diff options
author | gatorsmile <gatorsmile@gmail.com> | 2016-04-09 14:10:44 -0700 |
---|---|---|
committer | Yin Huai <yhuai@databricks.com> | 2016-04-09 14:10:44 -0700 |
commit | 9be5558e009069925d1f2d737d42e1683ed6b47f (patch) | |
tree | 4506ed79f1edb0be9529cb27f2da95382fa1be27 /sql/hive/src/main/scala/org | |
parent | 415446cc9b2652f6da11ee8ead5eb4e66685c45f (diff) | |
download | spark-9be5558e009069925d1f2d737d42e1683ed6b47f.tar.gz spark-9be5558e009069925d1f2d737d42e1683ed6b47f.tar.bz2 spark-9be5558e009069925d1f2d737d42e1683ed6b47f.zip |
[SPARK-14481][SQL] Issue Exceptions for All Unsupported Options during Parsing
#### What changes were proposed in this pull request?
"Not good to slightly ignore all the un-supported options/clauses. We should either support it or throw an exception." A comment from yhuai in another PR https://github.com/apache/spark/pull/12146
- Can `Explain` be an exception? The `Formatted` clause is used in `HiveCompatibilitySuite`.
- Two unsupported clauses in `Drop Table` are handled in a separate PR: https://github.com/apache/spark/pull/12146
#### How was this patch tested?
Test cases are added to verify all the cases.
Author: gatorsmile <gatorsmile@gmail.com>
Closes #12255 from gatorsmile/warningToException.
Diffstat (limited to 'sql/hive/src/main/scala/org')
-rw-r--r-- | sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala index ab69d3502e..657edb493a 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala @@ -162,14 +162,16 @@ class HiveSqlAstBuilder extends SparkSqlAstBuilder { // Unsupported clauses. if (temp) { - logWarning("TEMPORARY clause is ignored.") + throw new ParseException(s"Unsupported operation: TEMPORARY clause.", ctx) } if (ctx.bucketSpec != null) { // TODO add this - we need cluster columns in the CatalogTable for this to work. - logWarning("CLUSTERED BY ... [ORDERED BY ...] INTO ... BUCKETS clause is ignored.") + throw new ParseException("Unsupported operation: " + + "CLUSTERED BY ... [ORDERED BY ...] INTO ... BUCKETS clause.", ctx) } if (ctx.skewSpec != null) { - logWarning("SKEWED BY ... ON ... [STORED AS DIRECTORIES] clause is ignored.") + throw new ParseException("Operation not allowed: " + + "SKEWED BY ... ON ... [STORED AS DIRECTORIES] clause.", ctx) } // Create the schema. @@ -230,7 +232,7 @@ class HiveSqlAstBuilder extends SparkSqlAstBuilder { throw new ParseException(s"Operation not allowed: partitioned views", ctx) } else { if (ctx.STRING != null) { - logWarning("COMMENT clause is ignored.") + throw new ParseException("Unsupported operation: COMMENT clause", ctx) } val identifiers = Option(ctx.identifierCommentList).toSeq.flatMap(_.identifierComment.asScala) val schema = identifiers.map { ic => @@ -296,7 +298,8 @@ class HiveSqlAstBuilder extends SparkSqlAstBuilder { recordReader: Token, schemaLess: Boolean): HiveScriptIOSchema = { if (recordWriter != null || recordReader != null) { - logWarning("Used defined record reader/writer classes are currently ignored.") + throw new ParseException( + "Unsupported operation: Used defined record reader/writer classes.", ctx) } // Decode and input/output format. @@ -370,7 +373,8 @@ class HiveSqlAstBuilder extends SparkSqlAstBuilder { ctx: TableFileFormatContext): CatalogStorageFormat = withOrigin(ctx) { import ctx._ if (inDriver != null || outDriver != null) { - logWarning("INPUTDRIVER ... OUTPUTDRIVER ... clauses are ignored.") + throw new ParseException( + s"Operation not allowed: INPUTDRIVER ... OUTPUTDRIVER ... clauses", ctx) } EmptyStorageFormat.copy( inputFormat = Option(string(inFmt)), |