aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2014-11-24 12:43:45 -0800
committerMichael Armbrust <michael@databricks.com>2014-11-24 12:43:58 -0800
commitee1bc892a32bb969b051b3bc3eaaf9a54af1c7a3 (patch)
tree0d5034fadfdcf6fdd480f6f597b234e87e7a27c3 /core
parent1a12ca339cf038c44f5d7402d63851f48a055b35 (diff)
downloadspark-ee1bc892a32bb969b051b3bc3eaaf9a54af1c7a3.tar.gz
spark-ee1bc892a32bb969b051b3bc3eaaf9a54af1c7a3.tar.bz2
spark-ee1bc892a32bb969b051b3bc3eaaf9a54af1c7a3.zip
[SPARK-4479][SQL] Avoids unnecessary defensive copies when sort based shuffle is on
This PR is a workaround for SPARK-4479. Two changes are introduced: when merge sort is bypassed in `ExternalSorter`, 1. also bypass RDD elements buffering as buffering is the reason that `MutableRow` backed row objects must be copied, and 2. avoids defensive copies in `Exchange` operator <!-- Reviewable:start --> [<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/apache/spark/3422) <!-- Reviewable:end --> Author: Cheng Lian <lian@databricks.com> Closes #3422 from liancheng/avoids-defensive-copies and squashes the following commits: 591f2e9 [Cheng Lian] Passes all shuffle suites 0c3c91e [Cheng Lian] Fixes shuffle write metrics when merge sort is bypassed ed5df3c [Cheng Lian] Fixes styling changes f75089b [Cheng Lian] Avoids unnecessary defensive copies when sort based shuffle is on (cherry picked from commit a6d7b61f92dc7c1f9632cecb232afa8040ab2b4d) Signed-off-by: Michael Armbrust <michael@databricks.com>
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala23
-rw-r--r--core/src/test/scala/org/apache/spark/ShuffleSuite.scala12
2 files changed, 26 insertions, 9 deletions
diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
index c617ff5c51..15bda1c9cc 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
@@ -205,6 +205,13 @@ private[spark] class ExternalSorter[K, V, C](
map.changeValue((getPartition(kv._1), kv._1), update)
maybeSpillCollection(usingMap = true)
}
+ } else if (bypassMergeSort) {
+ // SPARK-4479: Also bypass buffering if merge sort is bypassed to avoid defensive copies
+ if (records.hasNext) {
+ spillToPartitionFiles(records.map { kv =>
+ ((getPartition(kv._1), kv._1), kv._2.asInstanceOf[C])
+ })
+ }
} else {
// Stick values into our buffer
while (records.hasNext) {
@@ -336,6 +343,10 @@ private[spark] class ExternalSorter[K, V, C](
* @param collection whichever collection we're using (map or buffer)
*/
private def spillToPartitionFiles(collection: SizeTrackingPairCollection[(Int, K), C]): Unit = {
+ spillToPartitionFiles(collection.iterator)
+ }
+
+ private def spillToPartitionFiles(iterator: Iterator[((Int, K), C)]): Unit = {
assert(bypassMergeSort)
// Create our file writers if we haven't done so yet
@@ -350,9 +361,9 @@ private[spark] class ExternalSorter[K, V, C](
}
}
- val it = collection.iterator // No need to sort stuff, just write each element out
- while (it.hasNext) {
- val elem = it.next()
+ // No need to sort stuff, just write each element out
+ while (iterator.hasNext) {
+ val elem = iterator.next()
val partitionId = elem._1._1
val key = elem._1._2
val value = elem._2
@@ -748,6 +759,12 @@ private[spark] class ExternalSorter[K, V, C](
context.taskMetrics.memoryBytesSpilled += memoryBytesSpilled
context.taskMetrics.diskBytesSpilled += diskBytesSpilled
+ context.taskMetrics.shuffleWriteMetrics.filter(_ => bypassMergeSort).foreach { m =>
+ if (curWriteMetrics != null) {
+ m.shuffleBytesWritten += curWriteMetrics.shuffleBytesWritten
+ m.shuffleWriteTime += curWriteMetrics.shuffleWriteTime
+ }
+ }
lengths
}
diff --git a/core/src/test/scala/org/apache/spark/ShuffleSuite.scala b/core/src/test/scala/org/apache/spark/ShuffleSuite.scala
index cda942e15a..85e5f9ab44 100644
--- a/core/src/test/scala/org/apache/spark/ShuffleSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ShuffleSuite.scala
@@ -95,14 +95,14 @@ abstract class ShuffleSuite extends FunSuite with Matchers with LocalSparkContex
// Use a local cluster with 2 processes to make sure there are both local and remote blocks
sc = new SparkContext("local-cluster[2,1,512]", "test", conf)
- // 10 partitions from 4 keys
- val NUM_BLOCKS = 10
+ // 201 partitions (greater than "spark.shuffle.sort.bypassMergeThreshold") from 4 keys
+ val NUM_BLOCKS = 201
val a = sc.parallelize(1 to 4, NUM_BLOCKS)
val b = a.map(x => (x, x*2))
// NOTE: The default Java serializer doesn't create zero-sized blocks.
// So, use Kryo
- val c = new ShuffledRDD[Int, Int, Int](b, new HashPartitioner(10))
+ val c = new ShuffledRDD[Int, Int, Int](b, new HashPartitioner(NUM_BLOCKS))
.setSerializer(new KryoSerializer(conf))
val shuffleId = c.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleId
@@ -122,13 +122,13 @@ abstract class ShuffleSuite extends FunSuite with Matchers with LocalSparkContex
// Use a local cluster with 2 processes to make sure there are both local and remote blocks
sc = new SparkContext("local-cluster[2,1,512]", "test", conf)
- // 10 partitions from 4 keys
- val NUM_BLOCKS = 10
+ // 201 partitions (greater than "spark.shuffle.sort.bypassMergeThreshold") from 4 keys
+ val NUM_BLOCKS = 201
val a = sc.parallelize(1 to 4, NUM_BLOCKS)
val b = a.map(x => (x, x*2))
// NOTE: The default Java serializer should create zero-sized blocks
- val c = new ShuffledRDD[Int, Int, Int](b, new HashPartitioner(10))
+ val c = new ShuffledRDD[Int, Int, Int](b, new HashPartitioner(NUM_BLOCKS))
val shuffleId = c.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleId
assert(c.count === 4)