diff options
author | Wenchen Fan <wenchen@databricks.com> | 2017-01-28 20:38:03 -0800 |
---|---|---|
committer | gatorsmile <gatorsmile@gmail.com> | 2017-01-28 20:38:03 -0800 |
commit | f7c07db852f22d694ca49792e4ceae04d45b71ef (patch) | |
tree | ac584ab6913a8149d47dd40da5b75028656cade6 /sql/hive/src/test/scala/org/apache | |
parent | cfcfc92f7bbdfd3a8b5e3948ae2f95c74d470434 (diff) | |
download | spark-f7c07db852f22d694ca49792e4ceae04d45b71ef.tar.gz spark-f7c07db852f22d694ca49792e4ceae04d45b71ef.tar.bz2 spark-f7c07db852f22d694ca49792e4ceae04d45b71ef.zip |
[SPARK-19152][SQL][FOLLOWUP] simplify CreateHiveTableAsSelectCommand
## What changes were proposed in this pull request?
After https://github.com/apache/spark/pull/16552 , `CreateHiveTableAsSelectCommand` becomes very similar to `CreateDataSourceTableAsSelectCommand`, and we can further simplify it by only creating table in the table-not-exist branch.
This PR also adds hive provider checking in DataStream reader/writer, which is missed in #16552
## How was this patch tested?
N/A
Author: Wenchen Fan <wenchen@databricks.com>
Closes #16693 from cloud-fan/minor.
Diffstat (limited to 'sql/hive/src/test/scala/org/apache')
-rw-r--r-- | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index 2827183456..58be079d01 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -38,7 +38,7 @@ import org.apache.spark.sql.types.StructType class HiveDDLSuite extends QueryTest with SQLTestUtils with TestHiveSingleton with BeforeAndAfterEach { - import spark.implicits._ + import testImplicits._ override def afterEach(): Unit = { try { @@ -1425,6 +1425,17 @@ class HiveDDLSuite Seq(1 -> "a").toDF("i", "j").write.format("hive").save(dir.getAbsolutePath) } assert(e2.message.contains("Hive data source can only be used with tables")) + + val e3 = intercept[AnalysisException] { + spark.readStream.format("hive").load(dir.getAbsolutePath) + } + assert(e3.message.contains("Hive data source can only be used with tables")) + + val e4 = intercept[AnalysisException] { + spark.readStream.schema(new StructType()).parquet(dir.getAbsolutePath) + .writeStream.format("hive").start(dir.getAbsolutePath) + } + assert(e4.message.contains("Hive data source can only be used with tables")) } } |