aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--python/pyspark/rdd.py13
-rw-r--r--python/pyspark/tests.py10
2 files changed, 20 insertions, 3 deletions
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index ed81eb16df..0e2ae19ca3 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -2017,8 +2017,7 @@ class RDD(object):
>>> len(rdd.repartition(10).glom().collect())
10
"""
- jrdd = self._jrdd.repartition(numPartitions)
- return RDD(jrdd, self.ctx, self._jrdd_deserializer)
+ return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
@@ -2029,7 +2028,15 @@ class RDD(object):
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
- jrdd = self._jrdd.coalesce(numPartitions, shuffle)
+ if shuffle:
+ # In Scala's repartition code, we will distribute elements evenly across output
+ # partitions. However, the RDD from Python is serialized as a single binary data,
+ # so the distribution fails and produces highly skewed partitions. We need to
+ # convert it to a RDD of java object before repartitioning.
+ data_java_rdd = self._to_java_object_rdd().coalesce(numPartitions, shuffle)
+ jrdd = self.ctx._jvm.SerDeUtil.javaToPython(data_java_rdd)
+ else:
+ jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py
index b0756911bf..3e0bd16d85 100644
--- a/python/pyspark/tests.py
+++ b/python/pyspark/tests.py
@@ -914,6 +914,16 @@ class RDDTests(ReusedPySparkTestCase):
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
+ def test_repartition_no_skewed(self):
+ num_partitions = 20
+ a = self.sc.parallelize(range(int(1000)), 2)
+ l = a.repartition(num_partitions).glom().map(len).collect()
+ zeros = len([x for x in l if x == 0])
+ self.assertTrue(zeros == 0)
+ l = a.coalesce(num_partitions, True).glom().map(len).collect()
+ zeros = len([x for x in l if x == 0])
+ self.assertTrue(zeros == 0)
+
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)