aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala5
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala11
2 files changed, 16 insertions, 0 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala
index 783252e0a2..219dae88e5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala
@@ -62,6 +62,11 @@ case class LogicalRelation(
case _ => false
}
+ // When comparing two LogicalRelations from within LogicalPlan.sameResult, we only need
+ // LogicalRelation.cleanArgs to return Seq(relation), since expectedOutputAttribute's
+ // expId can be different but the relation is still the same.
+ override lazy val cleanArgs: Seq[Any] = Seq(relation)
+
@transient override lazy val statistics: Statistics = Statistics(
sizeInBytes = BigInt(relation.sizeInBytes)
)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
index 9adb3780a2..5c2fc7d82f 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.hive
import java.io.File
import org.apache.spark.sql.columnar.InMemoryColumnarTableScan
+import org.apache.spark.sql.execution.datasources.parquet.ParquetRelation
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.{AnalysisException, QueryTest, SaveMode}
import org.apache.spark.storage.RDDBlockId
@@ -203,4 +204,14 @@ class CachedTableSuite extends QueryTest with TestHiveSingleton {
sql("DROP TABLE refreshTable")
Utils.deleteRecursively(tempPath)
}
+
+ test("SPARK-11246 cache parquet table") {
+ sql("CREATE TABLE cachedTable STORED AS PARQUET AS SELECT 1")
+
+ cacheTable("cachedTable")
+ val sparkPlan = sql("SELECT * FROM cachedTable").queryExecution.sparkPlan
+ assert(sparkPlan.collect { case e: InMemoryColumnarTableScan => e }.size === 1)
+
+ sql("DROP TABLE cachedTable")
+ }
}