aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorgatorsmile <gatorsmile@gmail.com>2017-01-22 20:37:37 -0800
committergatorsmile <gatorsmile@gmail.com>2017-01-22 20:37:37 -0800
commit772035e771a75593f031a8e78080bb58b8218e04 (patch)
tree8f8c56f1b57c594624f2d372cd18a9df133c2731 /python
parent74e65cb74a8c023870a2ac9b1216c9d89c02f014 (diff)
downloadspark-772035e771a75593f031a8e78080bb58b8218e04.tar.gz
spark-772035e771a75593f031a8e78080bb58b8218e04.tar.bz2
spark-772035e771a75593f031a8e78080bb58b8218e04.zip
[SPARK-19229][SQL] Disallow Creating Hive Source Tables when Hive Support is Not Enabled
### What changes were proposed in this pull request? It is weird to create Hive source tables when using InMemoryCatalog. We are unable to operate it. This PR is to block users to create Hive source tables. ### How was this patch tested? Fixed the test cases Author: gatorsmile <gatorsmile@gmail.com> Closes #16587 from gatorsmile/blockHiveTable.
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/sql/tests.py8
1 files changed, 4 insertions, 4 deletions
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 4bfe6e9eb3..a88e5a1cfb 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -1717,8 +1717,8 @@ class SQLTests(ReusedPySparkTestCase):
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
- spark.sql("CREATE TABLE tab1 (name STRING, age INT)")
- spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT)")
+ spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
+ spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
@@ -1796,8 +1796,8 @@ class SQLTests(ReusedPySparkTestCase):
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
- spark.sql("CREATE TABLE tab1 (name STRING, age INT)")
- spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT)")
+ spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
+ spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)