aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
authorgatorsmile <gatorsmile@gmail.com>2016-06-16 10:01:59 -0700
committerCheng Lian <lian@databricks.com>2016-06-16 10:01:59 -0700
commit6451cf9270b55465d8ecea4c4031329a1058561a (patch)
treea85d5758f2be2ab562f75ae04ec3096d8f950120 /sql/core/src/test
parent7c6c6926376c93acc42dd56a399d816f4838f28c (diff)
downloadspark-6451cf9270b55465d8ecea4c4031329a1058561a.tar.gz
spark-6451cf9270b55465d8ecea4c4031329a1058561a.tar.bz2
spark-6451cf9270b55465d8ecea4c4031329a1058561a.zip
[SPARK-15862][SQL] Better Error Message When Having Database Name in CACHE TABLE AS SELECT
#### What changes were proposed in this pull request? ~~If the temp table already exists, we should not silently replace it when doing `CACHE TABLE AS SELECT`. This is inconsistent with the behavior of `CREAT VIEW` or `CREATE TABLE`. This PR is to fix this silent drop.~~ ~~Maybe, we also can introduce new syntax for replacing the existing one. For example, in Hive, to replace a view, the syntax should be like `ALTER VIEW AS SELECT` or `CREATE OR REPLACE VIEW AS SELECT`~~ The table name in `CACHE TABLE AS SELECT` should NOT contain database prefix like "database.table". Thus, this PR captures this in Parser and outputs a better error message, instead of reporting the view already exists. In addition, refactoring the `Parser` to generate table identifiers instead of returning the table name string. #### How was this patch tested? - Added a test case for caching and uncaching qualified table names - Fixed a few test cases that do not drop temp table at the end - Added the related test case for the issue resolved in this PR Author: gatorsmile <gatorsmile@gmail.com> Author: xiaoli <lixiao1983@gmail.com> Author: Xiao Li <xiaoli@Xiaos-MacBook-Pro.local> Closes #13572 from gatorsmile/cacheTableAsSelect.
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala68
1 files changed, 38 insertions, 30 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index d7df18ae1c..6f6abfa93c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -73,11 +73,13 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("cache temp table") {
- testData.select('key).createOrReplaceTempView("tempTable")
- assertCached(sql("SELECT COUNT(*) FROM tempTable"), 0)
- spark.catalog.cacheTable("tempTable")
- assertCached(sql("SELECT COUNT(*) FROM tempTable"))
- spark.catalog.uncacheTable("tempTable")
+ withTempTable("tempTable") {
+ testData.select('key).createOrReplaceTempView("tempTable")
+ assertCached(sql("SELECT COUNT(*) FROM tempTable"), 0)
+ spark.catalog.cacheTable("tempTable")
+ assertCached(sql("SELECT COUNT(*) FROM tempTable"))
+ spark.catalog.uncacheTable("tempTable")
+ }
}
test("unpersist an uncached table will not raise exception") {
@@ -95,9 +97,11 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("cache table as select") {
- sql("CACHE TABLE tempTable AS SELECT key FROM testData")
- assertCached(sql("SELECT COUNT(*) FROM tempTable"))
- spark.catalog.uncacheTable("tempTable")
+ withTempTable("tempTable") {
+ sql("CACHE TABLE tempTable AS SELECT key FROM testData")
+ assertCached(sql("SELECT COUNT(*) FROM tempTable"))
+ spark.catalog.uncacheTable("tempTable")
+ }
}
test("uncaching temp table") {
@@ -223,32 +227,36 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("CACHE TABLE tableName AS SELECT * FROM anotherTable") {
- sql("CACHE TABLE testCacheTable AS SELECT * FROM testData")
- assertCached(spark.table("testCacheTable"))
-
- val rddId = rddIdOf("testCacheTable")
- assert(
- isMaterialized(rddId),
- "Eagerly cached in-memory table should have already been materialized")
-
- spark.catalog.uncacheTable("testCacheTable")
- eventually(timeout(10 seconds)) {
- assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
+ withTempTable("testCacheTable") {
+ sql("CACHE TABLE testCacheTable AS SELECT * FROM testData")
+ assertCached(spark.table("testCacheTable"))
+
+ val rddId = rddIdOf("testCacheTable")
+ assert(
+ isMaterialized(rddId),
+ "Eagerly cached in-memory table should have already been materialized")
+
+ spark.catalog.uncacheTable("testCacheTable")
+ eventually(timeout(10 seconds)) {
+ assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
+ }
}
}
test("CACHE TABLE tableName AS SELECT ...") {
- sql("CACHE TABLE testCacheTable AS SELECT key FROM testData LIMIT 10")
- assertCached(spark.table("testCacheTable"))
-
- val rddId = rddIdOf("testCacheTable")
- assert(
- isMaterialized(rddId),
- "Eagerly cached in-memory table should have already been materialized")
-
- spark.catalog.uncacheTable("testCacheTable")
- eventually(timeout(10 seconds)) {
- assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
+ withTempTable("testCacheTable") {
+ sql("CACHE TABLE testCacheTable AS SELECT key FROM testData LIMIT 10")
+ assertCached(spark.table("testCacheTable"))
+
+ val rddId = rddIdOf("testCacheTable")
+ assert(
+ isMaterialized(rddId),
+ "Eagerly cached in-memory table should have already been materialized")
+
+ spark.catalog.uncacheTable("testCacheTable")
+ eventually(timeout(10 seconds)) {
+ assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
+ }
}
}