aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/tests.py
diff options
context:
space:
mode:
authorDavies Liu <davies.liu@gmail.com>2014-07-24 22:53:47 -0700
committerMatei Zaharia <matei@databricks.com>2014-07-24 22:53:47 -0700
commit14174abd421318e71c16edd24224fd5094bdfed4 (patch)
tree0af1cd49e09c80d8b7dd62d9de90ff03842db382 /python/pyspark/tests.py
parenteff9714e1c88e39e28317358ca9ec87677f121dc (diff)
downloadspark-14174abd421318e71c16edd24224fd5094bdfed4.tar.gz
spark-14174abd421318e71c16edd24224fd5094bdfed4.tar.bz2
spark-14174abd421318e71c16edd24224fd5094bdfed4.zip
[SPARK-2538] [PySpark] Hash based disk spilling aggregation
During aggregation in Python worker, if the memory usage is above spark.executor.memory, it will do disk spilling aggregation. It will split the aggregation into multiple stage, in each stage, it will partition the aggregated data by hash and dump them into disks. After all the data are aggregated, it will merge all the stages together (partition by partition). Author: Davies Liu <davies.liu@gmail.com> Closes #1460 from davies/spill and squashes the following commits: cad91bf [Davies Liu] call gc.collect() after data.clear() to release memory as much as possible. 37d71f7 [Davies Liu] balance the partitions 902f036 [Davies Liu] add shuffle.py into run-tests dcf03a9 [Davies Liu] fix memory_info() of psutil 67e6eba [Davies Liu] comment for MAX_TOTAL_PARTITIONS f6bd5d6 [Davies Liu] rollback next_limit() again, the performance difference is huge: e74b785 [Davies Liu] fix code style and change next_limit to memory_limit 400be01 [Davies Liu] address all the comments 6178844 [Davies Liu] refactor and improve docs fdd0a49 [Davies Liu] add long doc string for ExternalMerger 1a97ce4 [Davies Liu] limit used memory and size of objects in partitionBy() e6cc7f9 [Davies Liu] Merge branch 'master' into spill 3652583 [Davies Liu] address comments e78a0a0 [Davies Liu] fix style 24cec6a [Davies Liu] get local directory by SPARK_LOCAL_DIR 57ee7ef [Davies Liu] update docs 286aaff [Davies Liu] let spilled aggregation in Python configurable e9a40f6 [Davies Liu] recursive merger 6edbd1f [Davies Liu] Hash based disk spilling aggregation
Diffstat (limited to 'python/pyspark/tests.py')
-rw-r--r--python/pyspark/tests.py57
1 files changed, 57 insertions, 0 deletions
diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py
index 9c5ecd0bb0..a92abbf371 100644
--- a/python/pyspark/tests.py
+++ b/python/pyspark/tests.py
@@ -34,6 +34,7 @@ import zipfile
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int
+from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger
_have_scipy = False
try:
@@ -47,6 +48,62 @@ except:
SPARK_HOME = os.environ["SPARK_HOME"]
+class TestMerger(unittest.TestCase):
+
+ def setUp(self):
+ self.N = 1 << 16
+ self.l = [i for i in xrange(self.N)]
+ self.data = zip(self.l, self.l)
+ self.agg = Aggregator(lambda x: [x],
+ lambda x, y: x.append(y) or x,
+ lambda x, y: x.extend(y) or x)
+
+ def test_in_memory(self):
+ m = InMemoryMerger(self.agg)
+ m.mergeValues(self.data)
+ self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
+ sum(xrange(self.N)))
+
+ m = InMemoryMerger(self.agg)
+ m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
+ self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
+ sum(xrange(self.N)))
+
+ def test_small_dataset(self):
+ m = ExternalMerger(self.agg, 1000)
+ m.mergeValues(self.data)
+ self.assertEqual(m.spills, 0)
+ self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
+ sum(xrange(self.N)))
+
+ m = ExternalMerger(self.agg, 1000)
+ m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
+ self.assertEqual(m.spills, 0)
+ self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
+ sum(xrange(self.N)))
+
+ def test_medium_dataset(self):
+ m = ExternalMerger(self.agg, 10)
+ m.mergeValues(self.data)
+ self.assertTrue(m.spills >= 1)
+ self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
+ sum(xrange(self.N)))
+
+ m = ExternalMerger(self.agg, 10)
+ m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
+ self.assertTrue(m.spills >= 1)
+ self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
+ sum(xrange(self.N)) * 3)
+
+ def test_huge_dataset(self):
+ m = ExternalMerger(self.agg, 10)
+ m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
+ self.assertTrue(m.spills >= 1)
+ self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
+ self.N * 10)
+ m._cleanup()
+
+
class PySparkTestCase(unittest.TestCase):
def setUp(self):