From 772035e771a75593f031a8e78080bb58b8218e04 Mon Sep 17 00:00:00 2001 From: gatorsmile Date: Sun, 22 Jan 2017 20:37:37 -0800 Subject: [SPARK-19229][SQL] Disallow Creating Hive Source Tables when Hive Support is Not Enabled ### What changes were proposed in this pull request? It is weird to create Hive source tables when using InMemoryCatalog. We are unable to operate it. This PR is to block users to create Hive source tables. ### How was this patch tested? Fixed the test cases Author: gatorsmile Closes #16587 from gatorsmile/blockHiveTable. --- python/pyspark/sql/tests.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'python') diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py index 4bfe6e9eb3..a88e5a1cfb 100644 --- a/python/pyspark/sql/tests.py +++ b/python/pyspark/sql/tests.py @@ -1717,8 +1717,8 @@ class SQLTests(ReusedPySparkTestCase): self.assertEquals(spark.catalog.listTables(), []) self.assertEquals(spark.catalog.listTables("some_db"), []) spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab") - spark.sql("CREATE TABLE tab1 (name STRING, age INT)") - spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT)") + spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet") + spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet") tables = sorted(spark.catalog.listTables(), key=lambda t: t.name) tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name) tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name) @@ -1796,8 +1796,8 @@ class SQLTests(ReusedPySparkTestCase): spark = self.spark spark.catalog._reset() spark.sql("CREATE DATABASE some_db") - spark.sql("CREATE TABLE tab1 (name STRING, age INT)") - spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT)") + spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet") + spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet") columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name) columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name) self.assertEquals(columns, columnsDefault) -- cgit v1.2.3