aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2016-04-24 20:48:01 -0700
committerReynold Xin <rxin@databricks.com>2016-04-24 20:48:01 -0700
commit35319d326488b3bf9235dfcf9ac4533ce846f21f (patch)
tree1056fe10d64c44e429c7edda1f90b177cfc39c6f
parentd34d6503786bbe429c10ddb1879519cc9bd709b6 (diff)
downloadspark-35319d326488b3bf9235dfcf9ac4533ce846f21f.tar.gz
spark-35319d326488b3bf9235dfcf9ac4533ce846f21f.tar.bz2
spark-35319d326488b3bf9235dfcf9ac4533ce846f21f.zip
[SPARK-14885][SQL] When creating a CatalogColumn, we should use the catalogString of a DataType object.
## What changes were proposed in this pull request? Right now, the data type field of a CatalogColumn is using the string representation. When we create this string from a DataType object, there are places where we use simpleString instead of catalogString. Although catalogString is the same as simpleString right now, it is still good to use catalogString. So, we will not silently introduce issues when we change the semantic of simpleString or the implementation of catalogString. ## How was this patch tested? Existing tests. Author: Yin Huai <yhuai@databricks.com> Closes #12654 from yhuai/useCatalogString.
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala4
2 files changed, 3 insertions, 3 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index 2b301a68db..f22ed432cd 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -1073,7 +1073,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
// just convert the whole type string to lower case, otherwise the struct field names
// will no longer be case sensitive. Instead, we rely on our parser to get the proper
// case before passing it to Hive.
- CatalystSqlParser.parseDataType(col.dataType.getText).simpleString,
+ CatalystSqlParser.parseDataType(col.dataType.getText).catalogString,
nullable = true,
Option(col.STRING).map(string))
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
index 7542f9d6c3..07cc4a9482 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
@@ -144,11 +144,11 @@ case class CreateViewCommand(
val viewSchema: Seq[CatalogColumn] = {
if (tableDesc.schema.isEmpty) {
analyzedPlan.output.map { a =>
- CatalogColumn(a.name, a.dataType.simpleString)
+ CatalogColumn(a.name, a.dataType.catalogString)
}
} else {
analyzedPlan.output.zip(tableDesc.schema).map { case (a, col) =>
- CatalogColumn(col.name, a.dataType.simpleString, nullable = true, col.comment)
+ CatalogColumn(col.name, a.dataType.catalogString, nullable = true, col.comment)
}
}
}