aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/shuffle.py
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2015-09-19 21:40:21 -0700
committerReynold Xin <rxin@databricks.com>2015-09-19 21:40:21 -0700
commit2117eea71ece825fbc3797c8b38184ae221f5223 (patch)
tree06481ef1968367118e89779335e24245f57f2017 /python/pyspark/shuffle.py
parente789000b88a6bd840f821c53f42c08b97dc02496 (diff)
downloadspark-2117eea71ece825fbc3797c8b38184ae221f5223.tar.gz
spark-2117eea71ece825fbc3797c8b38184ae221f5223.tar.bz2
spark-2117eea71ece825fbc3797c8b38184ae221f5223.zip
[SPARK-10710] Remove ability to disable spilling in core and SQL
It does not make much sense to set `spark.shuffle.spill` or `spark.sql.planner.externalSort` to false: I believe that these configurations were initially added as "escape hatches" to guard against bugs in the external operators, but these operators are now mature and well-tested. In addition, these configurations are not handled in a consistent way anymore: SQL's Tungsten codepath ignores these configurations and will continue to use spilling operators. Similarly, Spark Core's `tungsten-sort` shuffle manager does not respect `spark.shuffle.spill=false`. This pull request removes these configurations, adds warnings at the appropriate places, and deletes a large amount of code which was only used in code paths that did not support spilling. Author: Josh Rosen <joshrosen@databricks.com> Closes #8831 from JoshRosen/remove-ability-to-disable-spilling.
Diffstat (limited to 'python/pyspark/shuffle.py')
-rw-r--r--python/pyspark/shuffle.py30
1 files changed, 0 insertions, 30 deletions
diff --git a/python/pyspark/shuffle.py b/python/pyspark/shuffle.py
index b8118bdb7c..e974cda9fc 100644
--- a/python/pyspark/shuffle.py
+++ b/python/pyspark/shuffle.py
@@ -131,36 +131,6 @@ class Merger(object):
raise NotImplementedError
-class InMemoryMerger(Merger):
-
- """
- In memory merger based on in-memory dict.
- """
-
- def __init__(self, aggregator):
- Merger.__init__(self, aggregator)
- self.data = {}
-
- def mergeValues(self, iterator):
- """ Combine the items by creator and combiner """
- # speed up attributes lookup
- d, creator = self.data, self.agg.createCombiner
- comb = self.agg.mergeValue
- for k, v in iterator:
- d[k] = comb(d[k], v) if k in d else creator(v)
-
- def mergeCombiners(self, iterator):
- """ Merge the combined items by mergeCombiner """
- # speed up attributes lookup
- d, comb = self.data, self.agg.mergeCombiners
- for k, v in iterator:
- d[k] = comb(d[k], v) if k in d else v
-
- def items(self):
- """ Return the merged items ad iterator """
- return iter(self.data.items())
-
-
def _compressed_serializer(self, serializer=None):
# always use PickleSerializer to simplify implementation
ser = PickleSerializer()