aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src
diff options
context:
space:
mode:
authorWenchen Fan <wenchen@databricks.com>2016-09-01 16:45:22 +0800
committerWenchen Fan <wenchen@databricks.com>2016-09-01 16:45:22 +0800
commit8e740ae44d55570a3e7b6eae1f0239ac1319b986 (patch)
treea4055f80643f9411cfb14d7ab51179ac0b83e047 /sql/hive/src
parent1f06a5b6a0584d0c9656f58eaf54e54e2383c82b (diff)
downloadspark-8e740ae44d55570a3e7b6eae1f0239ac1319b986.tar.gz
spark-8e740ae44d55570a3e7b6eae1f0239ac1319b986.tar.bz2
spark-8e740ae44d55570a3e7b6eae1f0239ac1319b986.zip
[SPARK-17257][SQL] the physical plan of CREATE TABLE or CTAS should take CatalogTable
## What changes were proposed in this pull request? This is kind of a follow-up of https://github.com/apache/spark/pull/14482 . As we put `CatalogTable` in the logical plan directly, it makes sense to let physical plans take `CatalogTable` directly, instead of extracting some fields of `CatalogTable` in planner and then construct a new `CatalogTable` in physical plan. ## How was this patch tested? existing tests. Author: Wenchen Fan <wenchen@databricks.com> Closes #14823 from cloud-fan/create-table.
Diffstat (limited to 'sql/hive/src')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala6
1 files changed, 3 insertions, 3 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index d77bb5cf95..7a71475a2f 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -906,7 +906,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
val e = intercept[AnalysisException] {
createDF(10, 19).write.mode(SaveMode.Append).format("orc").saveAsTable("appendOrcToParquet")
}
- assert(e.getMessage.contains("The file format of the existing table `appendOrcToParquet` " +
+ assert(e.getMessage.contains("The file format of the existing table appendOrcToParquet " +
"is `org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat`. " +
"It doesn't match the specified format `orc`"))
}
@@ -917,7 +917,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
createDF(10, 19).write.mode(SaveMode.Append).format("parquet")
.saveAsTable("appendParquetToJson")
}
- assert(e.getMessage.contains("The file format of the existing table `appendParquetToJson` " +
+ assert(e.getMessage.contains("The file format of the existing table appendParquetToJson " +
"is `org.apache.spark.sql.execution.datasources.json.JsonFileFormat`. " +
"It doesn't match the specified format `parquet`"))
}
@@ -928,7 +928,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
createDF(10, 19).write.mode(SaveMode.Append).format("text")
.saveAsTable("appendTextToJson")
}
- assert(e.getMessage.contains("The file format of the existing table `appendTextToJson` is " +
+ assert(e.getMessage.contains("The file format of the existing table appendTextToJson is " +
"`org.apache.spark.sql.execution.datasources.json.JsonFileFormat`. " +
"It doesn't match the specified format `text`"))
}