diff options
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala | 2 | ||||
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala | 4 |
2 files changed, 3 insertions, 3 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 2b301a68db..f22ed432cd 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -1073,7 +1073,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder { // just convert the whole type string to lower case, otherwise the struct field names // will no longer be case sensitive. Instead, we rely on our parser to get the proper // case before passing it to Hive. - CatalystSqlParser.parseDataType(col.dataType.getText).simpleString, + CatalystSqlParser.parseDataType(col.dataType.getText).catalogString, nullable = true, Option(col.STRING).map(string)) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala index 7542f9d6c3..07cc4a9482 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala @@ -144,11 +144,11 @@ case class CreateViewCommand( val viewSchema: Seq[CatalogColumn] = { if (tableDesc.schema.isEmpty) { analyzedPlan.output.map { a => - CatalogColumn(a.name, a.dataType.simpleString) + CatalogColumn(a.name, a.dataType.catalogString) } } else { analyzedPlan.output.zip(tableDesc.schema).map { case (a, col) => - CatalogColumn(col.name, a.dataType.simpleString, nullable = true, col.comment) + CatalogColumn(col.name, a.dataType.catalogString, nullable = true, col.comment) } } } |