diff options
author | Yin Huai <yhuai@databricks.com> | 2016-04-29 22:49:12 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2016-04-29 22:49:12 -0700 |
commit | ac41fc648de584f08863313fbac0c5bb6fc6a65e (patch) | |
tree | f72da33e155967bda250166f9083940bbe32c845 /sql/hive/src/main | |
parent | 7945f9f6d431453a192bea66f66fec813913e4c8 (diff) | |
download | spark-ac41fc648de584f08863313fbac0c5bb6fc6a65e.tar.gz spark-ac41fc648de584f08863313fbac0c5bb6fc6a65e.tar.bz2 spark-ac41fc648de584f08863313fbac0c5bb6fc6a65e.zip |
[SPARK-14591][SQL] Remove DataTypeParser and add more keywords to the nonReserved list.
## What changes were proposed in this pull request?
CatalystSqlParser can parse data types. So, we do not need to have an individual DataTypeParser.
## How was this patch tested?
Existing tests
Author: Yin Huai <yhuai@databricks.com>
Closes #12796 from yhuai/removeDataTypeParser.
Diffstat (limited to 'sql/hive/src/main')
-rw-r--r-- | sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala | 4 | ||||
-rw-r--r-- | sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala | 4 |
2 files changed, 4 insertions, 4 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala index 5b580d0ef9..1671228fd9 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/MetastoreRelation.scala @@ -30,7 +30,7 @@ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.expressions.{AttributeMap, AttributeReference, Expression} -import org.apache.spark.sql.catalyst.parser.DataTypeParser +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics} import org.apache.spark.sql.execution.FileRelation import org.apache.spark.sql.hive.client.HiveClient @@ -188,7 +188,7 @@ private[hive] case class MetastoreRelation( implicit class SchemaAttribute(f: CatalogColumn) { def toAttribute: AttributeReference = AttributeReference( f.name, - DataTypeParser.parse(f.dataType), + CatalystSqlParser.parseDataType(f.dataType), // Since data can be dumped in randomly with no validation, everything is nullable. nullable = true )(qualifier = Some(alias.getOrElse(tableName))) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala index da7b73ae64..13d2bed606 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging -import org.apache.spark.sql.catalyst.parser.DataTypeParser +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.types.StructType private[orc] object OrcFileOperator extends Logging { @@ -78,7 +78,7 @@ private[orc] object OrcFileOperator extends Logging { val readerInspector = reader.getObjectInspector.asInstanceOf[StructObjectInspector] val schema = readerInspector.getTypeName logDebug(s"Reading schema from file $paths, got Hive schema string: $schema") - DataTypeParser.parse(schema).asInstanceOf[StructType] + CatalystSqlParser.parseDataType(schema).asInstanceOf[StructType] } } |