From 59b0606f334a192e110e6a79003145931f62b928 Mon Sep 17 00:00:00 2001 From: Cheng Lian Date: Thu, 8 Oct 2015 09:20:36 -0700 Subject: [SPARK-10999] [SQL] Coalesce should be able to handle UnsafeRow Author: Cheng Lian Closes #9024 from liancheng/spark-10999.coalesce-unsafe-row-handling. --- .../scala/org/apache/spark/sql/execution/basicOperators.scala | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala index d4bbbeb39e..7804b67ac2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala @@ -17,20 +17,15 @@ package org.apache.spark.sql.execution -import java.util.Random - import org.apache.spark.annotation.DeveloperApi import org.apache.spark.rdd.{PartitionwiseSampledRDD, RDD, ShuffledRDD} -import org.apache.spark.serializer.Serializer import org.apache.spark.shuffle.sort.SortShuffleManager -import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.InternalRow -import org.apache.spark.sql.catalyst.CatalystTypeConverters import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.execution.metric.SQLMetrics -import org.apache.spark.util.random.PoissonSampler import org.apache.spark.util.MutablePair +import org.apache.spark.util.random.PoissonSampler import org.apache.spark.{HashPartitioner, SparkEnv} /** @@ -294,6 +289,8 @@ case class Coalesce(numPartitions: Int, child: SparkPlan) extends UnaryNode { protected override def doExecute(): RDD[InternalRow] = { child.execute().map(_.copy()).coalesce(numPartitions, shuffle = false) } + + override def canProcessUnsafeRows: Boolean = true } /** -- cgit v1.2.3