aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
authorDilip Biswal <dbiswal@us.ibm.com>2017-04-12 12:18:01 +0800
committerWenchen Fan <wenchen@databricks.com>2017-04-12 12:18:01 +0800
commitb14bfc3f8e97479ac5927c071b00ed18f2104c95 (patch)
tree0ee9e829fa341baa93677ecd578055d3267cc4d5 /sql/core/src
parent8ad63ee158815de5ffff7bf03cdf25aef312095f (diff)
downloadspark-b14bfc3f8e97479ac5927c071b00ed18f2104c95.tar.gz
spark-b14bfc3f8e97479ac5927c071b00ed18f2104c95.tar.bz2
spark-b14bfc3f8e97479ac5927c071b00ed18f2104c95.zip
[SPARK-19993][SQL] Caching logical plans containing subquery expressions does not work.
## What changes were proposed in this pull request? The sameResult() method does not work when the logical plan contains subquery expressions. **Before the fix** ```SQL scala> val ds = spark.sql("select * from s1 where s1.c1 in (select s2.c1 from s2 where s1.c1 = s2.c1)") ds: org.apache.spark.sql.DataFrame = [c1: int] scala> ds.cache res13: ds.type = [c1: int] scala> spark.sql("select * from s1 where s1.c1 in (select s2.c1 from s2 where s1.c1 = s2.c1)").explain(true) == Analyzed Logical Plan == c1: int Project [c1#86] +- Filter c1#86 IN (list#78 [c1#86]) : +- Project [c1#87] : +- Filter (outer(c1#86) = c1#87) : +- SubqueryAlias s2 : +- Relation[c1#87] parquet +- SubqueryAlias s1 +- Relation[c1#86] parquet == Optimized Logical Plan == Join LeftSemi, ((c1#86 = c1#87) && (c1#86 = c1#87)) :- Relation[c1#86] parquet +- Relation[c1#87] parquet ``` **Plan after fix** ```SQL == Analyzed Logical Plan == c1: int Project [c1#22] +- Filter c1#22 IN (list#14 [c1#22]) : +- Project [c1#23] : +- Filter (outer(c1#22) = c1#23) : +- SubqueryAlias s2 : +- Relation[c1#23] parquet +- SubqueryAlias s1 +- Relation[c1#22] parquet == Optimized Logical Plan == InMemoryRelation [c1#22], true, 10000, StorageLevel(disk, memory, deserialized, 1 replicas) +- *BroadcastHashJoin [c1#1, c1#1], [c1#2, c1#2], LeftSemi, BuildRight :- *FileScan parquet default.s1[c1#1] Batched: true, Format: Parquet, Location: InMemoryFileIndex[file:/Users/dbiswal/mygit/apache/spark/bin/spark-warehouse/s1], PartitionFilters: [], PushedFilters: [], ReadSchema: struct<c1:int> +- BroadcastExchange HashedRelationBroadcastMode(List((shiftleft(cast(input[0, int, true] as bigint), 32) | (cast(input[0, int, true] as bigint) & 4294967295)))) +- *FileScan parquet default.s2[c1#2] Batched: true, Format: Parquet, Location: InMemoryFileIndex[file:/Users/dbiswal/mygit/apache/spark/bin/spark-warehouse/s2], PartitionFilters: [], PushedFilters: [], ReadSchema: struct<c1:int> ``` ## How was this patch tested? New tests are added to CachedTableSuite. Author: Dilip Biswal <dbiswal@us.ibm.com> Closes #17330 from dilipbiswal/subquery_cache_final.
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala7
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala143
2 files changed, 146 insertions, 4 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala
index 3a9132d74a..866fa98533 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala
@@ -28,6 +28,7 @@ import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
+import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, UnknownPartitioning}
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.parquet.{ParquetFileFormat => ParquetSource}
@@ -516,10 +517,10 @@ case class FileSourceScanExec(
override lazy val canonicalized: FileSourceScanExec = {
FileSourceScanExec(
relation,
- output.map(normalizeExprId(_, output)),
+ output.map(QueryPlan.normalizeExprId(_, output)),
requiredSchema,
- partitionFilters.map(normalizeExprId(_, output)),
- dataFilters.map(normalizeExprId(_, output)),
+ partitionFilters.map(QueryPlan.normalizeExprId(_, output)),
+ dataFilters.map(QueryPlan.normalizeExprId(_, output)),
None)
}
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index 7a7d52b214..e66fe97afa 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -26,7 +26,7 @@ import org.scalatest.concurrent.Eventually._
import org.apache.spark.CleanerListener
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
-import org.apache.spark.sql.execution.RDDScanExec
+import org.apache.spark.sql.execution.{RDDScanExec, SparkPlan}
import org.apache.spark.sql.execution.columnar._
import org.apache.spark.sql.execution.exchange.ShuffleExchange
import org.apache.spark.sql.functions._
@@ -76,6 +76,13 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
sum
}
+ private def getNumInMemoryTablesRecursively(plan: SparkPlan): Int = {
+ plan.collect {
+ case InMemoryTableScanExec(_, _, relation) =>
+ getNumInMemoryTablesRecursively(relation.child) + 1
+ }.sum
+ }
+
test("withColumn doesn't invalidate cached dataframe") {
var evalCount = 0
val myUDF = udf((x: String) => { evalCount += 1; "result" })
@@ -670,4 +677,138 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
assert(spark.read.parquet(path).filter($"id" > 4).count() == 15)
}
}
+
+ test("SPARK-19993 simple subquery caching") {
+ withTempView("t1", "t2") {
+ Seq(1).toDF("c1").createOrReplaceTempView("t1")
+ Seq(2).toDF("c1").createOrReplaceTempView("t2")
+
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |NOT EXISTS (SELECT * FROM t2)
+ """.stripMargin).cache()
+
+ val cachedDs =
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |NOT EXISTS (SELECT * FROM t2)
+ """.stripMargin)
+ assert(getNumInMemoryRelations(cachedDs) == 1)
+
+ // Additional predicate in the subquery plan should cause a cache miss
+ val cachedMissDs =
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |NOT EXISTS (SELECT * FROM t2 where c1 = 0)
+ """.stripMargin)
+ assert(getNumInMemoryRelations(cachedMissDs) == 0)
+ }
+ }
+
+ test("SPARK-19993 subquery caching with correlated predicates") {
+ withTempView("t1", "t2") {
+ Seq(1).toDF("c1").createOrReplaceTempView("t1")
+ Seq(1).toDF("c1").createOrReplaceTempView("t2")
+
+ // Simple correlated predicate in subquery
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |t1.c1 in (SELECT t2.c1 FROM t2 where t1.c1 = t2.c1)
+ """.stripMargin).cache()
+
+ val cachedDs =
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |t1.c1 in (SELECT t2.c1 FROM t2 where t1.c1 = t2.c1)
+ """.stripMargin)
+ assert(getNumInMemoryRelations(cachedDs) == 1)
+ }
+ }
+
+ test("SPARK-19993 subquery with cached underlying relation") {
+ withTempView("t1") {
+ Seq(1).toDF("c1").createOrReplaceTempView("t1")
+ spark.catalog.cacheTable("t1")
+
+ // underlying table t1 is cached as well as the query that refers to it.
+ val ds =
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |NOT EXISTS (SELECT * FROM t1)
+ """.stripMargin)
+ assert(getNumInMemoryRelations(ds) == 2)
+
+ val cachedDs =
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |NOT EXISTS (SELECT * FROM t1)
+ """.stripMargin).cache()
+ assert(getNumInMemoryTablesRecursively(cachedDs.queryExecution.sparkPlan) == 3)
+ }
+ }
+
+ test("SPARK-19993 nested subquery caching and scalar + predicate subqueris") {
+ withTempView("t1", "t2", "t3", "t4") {
+ Seq(1).toDF("c1").createOrReplaceTempView("t1")
+ Seq(2).toDF("c1").createOrReplaceTempView("t2")
+ Seq(1).toDF("c1").createOrReplaceTempView("t3")
+ Seq(1).toDF("c1").createOrReplaceTempView("t4")
+
+ // Nested predicate subquery
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1))
+ """.stripMargin).cache()
+
+ val cachedDs =
+ sql(
+ """
+ |SELECT * FROM t1
+ |WHERE
+ |c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1))
+ """.stripMargin)
+ assert(getNumInMemoryRelations(cachedDs) == 1)
+
+ // Scalar subquery and predicate subquery
+ sql(
+ """
+ |SELECT * FROM (SELECT max(c1) FROM t1 GROUP BY c1)
+ |WHERE
+ |c1 = (SELECT max(c1) FROM t2 GROUP BY c1)
+ |OR
+ |EXISTS (SELECT c1 FROM t3)
+ |OR
+ |c1 IN (SELECT c1 FROM t4)
+ """.stripMargin).cache()
+
+ val cachedDs2 =
+ sql(
+ """
+ |SELECT * FROM (SELECT max(c1) FROM t1 GROUP BY c1)
+ |WHERE
+ |c1 = (SELECT max(c1) FROM t2 GROUP BY c1)
+ |OR
+ |EXISTS (SELECT c1 FROM t3)
+ |OR
+ |c1 IN (SELECT c1 FROM t4)
+ """.stripMargin)
+ assert(getNumInMemoryRelations(cachedDs2) == 1)
+ }
+ }
}