aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorPatrick Wendell <pwendell@gmail.com>2014-01-13 13:24:04 -0800
committerPatrick Wendell <pwendell@gmail.com>2014-01-13 13:25:06 -0800
commitc3816de5040e3c48e58ed4762d2f4eb606812938 (patch)
tree9fe23220986be4ac1a404168f4839a4c6948e99b /core
parent5d61e051c2ad5955f0101de6f0ecdf5d243e4f5e (diff)
downloadspark-c3816de5040e3c48e58ed4762d2f4eb606812938.tar.gz
spark-c3816de5040e3c48e58ed4762d2f4eb606812938.tar.bz2
spark-c3816de5040e3c48e58ed4762d2f4eb606812938.zip
Changing option wording per discussion with Andrew
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/Aggregator.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/storage/BlockManager.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala4
3 files changed, 5 insertions, 5 deletions
diff --git a/core/src/main/scala/org/apache/spark/Aggregator.scala b/core/src/main/scala/org/apache/spark/Aggregator.scala
index ee4467085f..38073707cd 100644
--- a/core/src/main/scala/org/apache/spark/Aggregator.scala
+++ b/core/src/main/scala/org/apache/spark/Aggregator.scala
@@ -32,7 +32,7 @@ case class Aggregator[K, V, C] (
mergeCombiners: (C, C) => C) {
private val sparkConf = SparkEnv.get.conf
- private val externalSorting = sparkConf.getBoolean("spark.shuffle.external", true)
+ private val externalSorting = sparkConf.getBoolean("spark.shuffle.spill", true)
def combineValuesByKey(iter: Iterator[_ <: Product2[K, V]]) : Iterator[(K, C)] = {
if (!externalSorting) {
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
index 0e770ed152..6461deee32 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
@@ -81,7 +81,7 @@ private[spark] class BlockManager(
// Whether to compress RDD partitions that are stored serialized
val compressRdds = conf.getBoolean("spark.rdd.compress", false)
// Whether to compress shuffle output temporarily spilled to disk
- val compressExternalShuffle = conf.getBoolean("spark.shuffle.external.compress", false)
+ val compressShuffleSpill = conf.getBoolean("spark.shuffle.spill.compress", false)
val heartBeatFrequency = BlockManager.getHeartBeatFrequency(conf)
@@ -792,7 +792,7 @@ private[spark] class BlockManager(
case ShuffleBlockId(_, _, _) => compressShuffle
case BroadcastBlockId(_) => compressBroadcast
case RDDBlockId(_, _) => compressRdds
- case TempBlockId(_) => compressExternalShuffle
+ case TempBlockId(_) => compressShuffleSpill
case _ => false
}
diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
index fd17413952..2eef6a7c10 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
@@ -87,9 +87,9 @@ private[spark] class ExternalAppendOnlyMap[K, V, C](
// batches, with each batch using its own serialization stream. This cuts down on the size
// of reference-tracking maps constructed when deserializing a stream.
//
- // NOTE: Setting this too low can cause excess copying when serializing, since some serailizers
+ // NOTE: Setting this too low can cause excess copying when serializing, since some serializers
// grow internal data structures by growing + copying every time the number of objects doubles.
- private val serializerBatchSize = sparkConf.getLong("spark.shuffle.external.batchSize", 10000)
+ private val serializerBatchSize = sparkConf.getLong("spark.shuffle.spill.batchSize", 10000)
// How many times we have spilled so far
private var spillCount = 0