aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2017-03-24 23:57:29 +0100
committerReynold Xin <rxin@databricks.com>2017-03-24 23:57:29 +0100
commitb5c5bd98ea5e8dbfebcf86c5459bdf765f5ceb53 (patch)
tree9fbe647521a433a2275616ac881ee2f94babd143 /sql/core
parent91fa80fe8a2480d64c430bd10f97b3d44c007bcc (diff)
downloadspark-b5c5bd98ea5e8dbfebcf86c5459bdf765f5ceb53.tar.gz
spark-b5c5bd98ea5e8dbfebcf86c5459bdf765f5ceb53.tar.bz2
spark-b5c5bd98ea5e8dbfebcf86c5459bdf765f5ceb53.zip
Disable generate codegen since it fails my workload.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/GenerateExec.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala28
2 files changed, 1 insertions, 29 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/GenerateExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/GenerateExec.scala
index 69be7094d2..f87d05884b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/GenerateExec.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/GenerateExec.scala
@@ -119,7 +119,7 @@ case class GenerateExec(
}
}
- override def supportCodegen: Boolean = generator.supportCodegen
+ override def supportCodegen: Boolean = false
override def inputRDDs(): Seq[RDD[InternalRow]] = {
child.asInstanceOf[CodegenSupport].inputRDDs()
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
index 4d9203556d..a4b30a2f8c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
@@ -116,34 +116,6 @@ class WholeStageCodegenSuite extends SparkPlanTest with SharedSQLContext {
assert(ds.collect() === Array(("a", 10.0), ("b", 3.0), ("c", 1.0)))
}
- test("generate should be included in WholeStageCodegen") {
- import org.apache.spark.sql.functions._
- val ds = spark.range(2).select(
- col("id"),
- explode(array(col("id") + 1, col("id") + 2)).as("value"))
- val plan = ds.queryExecution.executedPlan
- assert(plan.find(p =>
- p.isInstanceOf[WholeStageCodegenExec] &&
- p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[GenerateExec]).isDefined)
- assert(ds.collect() === Array(Row(0, 1), Row(0, 2), Row(1, 2), Row(1, 3)))
- }
-
- test("large stack generator should not use WholeStageCodegen") {
- def createStackGenerator(rows: Int): SparkPlan = {
- val id = UnresolvedAttribute("id")
- val stack = Stack(Literal(rows) +: Seq.tabulate(rows)(i => Add(id, Literal(i))))
- spark.range(500).select(Column(stack)).queryExecution.executedPlan
- }
- val isCodeGenerated: SparkPlan => Boolean = {
- case WholeStageCodegenExec(_: GenerateExec) => true
- case _ => false
- }
-
- // Only 'stack' generators that produce 50 rows or less are code generated.
- assert(createStackGenerator(50).find(isCodeGenerated).isDefined)
- assert(createStackGenerator(100).find(isCodeGenerated).isEmpty)
- }
-
test("SPARK-19512 codegen for comparing structs is incorrect") {
// this would raise CompileException before the fix
spark.range(10)