aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test/scala/org/apache
diff options
context:
space:
mode:
authorWenchen Fan <wenchen@databricks.com>2017-01-28 20:38:03 -0800
committergatorsmile <gatorsmile@gmail.com>2017-01-28 20:38:03 -0800
commitf7c07db852f22d694ca49792e4ceae04d45b71ef (patch)
treeac584ab6913a8149d47dd40da5b75028656cade6 /sql/hive/src/test/scala/org/apache
parentcfcfc92f7bbdfd3a8b5e3948ae2f95c74d470434 (diff)
downloadspark-f7c07db852f22d694ca49792e4ceae04d45b71ef.tar.gz
spark-f7c07db852f22d694ca49792e4ceae04d45b71ef.tar.bz2
spark-f7c07db852f22d694ca49792e4ceae04d45b71ef.zip
[SPARK-19152][SQL][FOLLOWUP] simplify CreateHiveTableAsSelectCommand
## What changes were proposed in this pull request? After https://github.com/apache/spark/pull/16552 , `CreateHiveTableAsSelectCommand` becomes very similar to `CreateDataSourceTableAsSelectCommand`, and we can further simplify it by only creating table in the table-not-exist branch. This PR also adds hive provider checking in DataStream reader/writer, which is missed in #16552 ## How was this patch tested? N/A Author: Wenchen Fan <wenchen@databricks.com> Closes #16693 from cloud-fan/minor.
Diffstat (limited to 'sql/hive/src/test/scala/org/apache')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala13
1 files changed, 12 insertions, 1 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
index 2827183456..58be079d01 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
@@ -38,7 +38,7 @@ import org.apache.spark.sql.types.StructType
class HiveDDLSuite
extends QueryTest with SQLTestUtils with TestHiveSingleton with BeforeAndAfterEach {
- import spark.implicits._
+ import testImplicits._
override def afterEach(): Unit = {
try {
@@ -1425,6 +1425,17 @@ class HiveDDLSuite
Seq(1 -> "a").toDF("i", "j").write.format("hive").save(dir.getAbsolutePath)
}
assert(e2.message.contains("Hive data source can only be used with tables"))
+
+ val e3 = intercept[AnalysisException] {
+ spark.readStream.format("hive").load(dir.getAbsolutePath)
+ }
+ assert(e3.message.contains("Hive data source can only be used with tables"))
+
+ val e4 = intercept[AnalysisException] {
+ spark.readStream.schema(new StructType()).parquet(dir.getAbsolutePath)
+ .writeStream.format("hive").start(dir.getAbsolutePath)
+ }
+ assert(e4.message.contains("Hive data source can only be used with tables"))
}
}