aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorouyangxiaochen <ou.yangxiaochen@zte.com.cn>2017-02-13 19:41:44 -0800
committerXiao Li <gatorsmile@gmail.com>2017-02-13 19:41:44 -0800
commit6e45b547ceadbbe8394bf149945b7942df82660a (patch)
treefda868dd2b8ee22c1ef941926f0ad07deb1cfc72 /sql/core
parente02ac303c6356cdf7fffec7361311d828a723afe (diff)
downloadspark-6e45b547ceadbbe8394bf149945b7942df82660a.tar.gz
spark-6e45b547ceadbbe8394bf149945b7942df82660a.tar.bz2
spark-6e45b547ceadbbe8394bf149945b7942df82660a.zip
[SPARK-19115][SQL] Supporting Create Table Like Location
What changes were proposed in this pull request? Support CREATE [EXTERNAL] TABLE LIKE LOCATION... syntax for Hive serde and datasource tables. In this PR,we follow SparkSQL design rules : supporting create table like view or physical table or temporary view with location. creating a table with location,this table will be an external table other than managed table. How was this patch tested? Add new test cases and update existing test cases Author: ouyangxiaochen <ou.yangxiaochen@zte.com.cn> Closes #16868 from ouyangxiaochen/spark19115.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala5
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala14
2 files changed, 12 insertions, 7 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index 41768d4512..ca76a10f79 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -1141,13 +1141,14 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
* For example:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
- * LIKE [other_db_name.]existing_table_name
+ * LIKE [other_db_name.]existing_table_name [locationSpec]
* }}}
*/
override def visitCreateTableLike(ctx: CreateTableLikeContext): LogicalPlan = withOrigin(ctx) {
val targetTable = visitTableIdentifier(ctx.target)
val sourceTable = visitTableIdentifier(ctx.source)
- CreateTableLikeCommand(targetTable, sourceTable, ctx.EXISTS != null)
+ val location = Option(ctx.locationSpec).map(visitLocationSpec)
+ CreateTableLikeCommand(targetTable, sourceTable, location, ctx.EXISTS != null)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
index bc4b5b6258..d646a215c3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
@@ -42,7 +42,7 @@ import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
- * A command to create a MANAGED table with the same definition of the given existing table.
+ * A command to create a table with the same definition of the given existing table.
* In the target table definition, the table comment is always empty but the column comments
* are identical to the ones defined in the source table.
*
@@ -52,12 +52,13 @@ import org.apache.spark.util.Utils
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
- * LIKE [other_db_name.]existing_table_name
+ * LIKE [other_db_name.]existing_table_name [locationSpec]
* }}}
*/
case class CreateTableLikeCommand(
targetTable: TableIdentifier,
sourceTable: TableIdentifier,
+ location: Option[String],
ifNotExists: Boolean) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
@@ -70,12 +71,15 @@ case class CreateTableLikeCommand(
sourceTableDesc.provider
}
+ // If the location is specified, we create an external table internally.
+ // Otherwise create a managed table.
+ val tblType = if (location.isEmpty) CatalogTableType.MANAGED else CatalogTableType.EXTERNAL
+
val newTableDesc =
CatalogTable(
identifier = targetTable,
- tableType = CatalogTableType.MANAGED,
- // We are creating a new managed table, which should not have custom table location.
- storage = sourceTableDesc.storage.copy(locationUri = None),
+ tableType = tblType,
+ storage = sourceTableDesc.storage.copy(locationUri = location),
schema = sourceTableDesc.schema,
provider = newProvider,
partitionColumnNames = sourceTableDesc.partitionColumnNames,