aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2015-10-08 09:20:36 -0700
committerCheng Lian <lian@databricks.com>2015-10-08 09:20:36 -0700
commit59b0606f334a192e110e6a79003145931f62b928 (patch)
treec3db637fbe2ef1d99b56572e7bec767bc328130c
parent60150cf00a70e684d2cad864ab055ad53106938b (diff)
downloadspark-59b0606f334a192e110e6a79003145931f62b928.tar.gz
spark-59b0606f334a192e110e6a79003145931f62b928.tar.bz2
spark-59b0606f334a192e110e6a79003145931f62b928.zip
[SPARK-10999] [SQL] Coalesce should be able to handle UnsafeRow
Author: Cheng Lian <lian@databricks.com> Closes #9024 from liancheng/spark-10999.coalesce-unsafe-row-handling.
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala9
1 files changed, 3 insertions, 6 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
index d4bbbeb39e..7804b67ac2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
@@ -17,20 +17,15 @@
package org.apache.spark.sql.execution
-import java.util.Random
-
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.{PartitionwiseSampledRDD, RDD, ShuffledRDD}
-import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.sort.SortShuffleManager
-import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.CatalystTypeConverters
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.metric.SQLMetrics
-import org.apache.spark.util.random.PoissonSampler
import org.apache.spark.util.MutablePair
+import org.apache.spark.util.random.PoissonSampler
import org.apache.spark.{HashPartitioner, SparkEnv}
/**
@@ -294,6 +289,8 @@ case class Coalesce(numPartitions: Int, child: SparkPlan) extends UnaryNode {
protected override def doExecute(): RDD[InternalRow] = {
child.execute().map(_.copy()).coalesce(numPartitions, shuffle = false)
}
+
+ override def canProcessUnsafeRows: Boolean = true
}
/**