aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2016-04-29 14:52:32 -0700
committerMichael Armbrust <michael@databricks.com>2016-04-29 14:52:32 -0700
commita04b1de5faa5270f48ef0ca1fbaf630ed72c3918 (patch)
tree9f4c2ad1eb6ce72c6a9bdc543047e35d2945b26b /sql
parent8ebae466a399f4373b4cc881936d8de9d4b946ed (diff)
downloadspark-a04b1de5faa5270f48ef0ca1fbaf630ed72c3918.tar.gz
spark-a04b1de5faa5270f48ef0ca1fbaf630ed72c3918.tar.bz2
spark-a04b1de5faa5270f48ef0ca1fbaf630ed72c3918.zip
[SPARK-14981][SQL] Throws exception if DESC is specified for sorting columns
## What changes were proposed in this pull request? Currently Spark SQL doesn't support sorting columns in descending order. However, the parser accepts the syntax and silently drops sorting directions. This PR fixes this by throwing an exception if `DESC` is specified as sorting direction of a sorting column. ## How was this patch tested? A test case is added to test the invalid sorting order by checking exception message. Author: Cheng Lian <lian@databricks.com> Closes #12759 from liancheng/spark-14981.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala13
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala26
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala17
3 files changed, 41 insertions, 15 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index c8e1003c02..8128a6efe3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -746,9 +746,18 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
BucketSpec(
ctx.INTEGER_VALUE.getText.toInt,
visitIdentifierList(ctx.identifierList),
- Option(ctx.orderedIdentifierList).toSeq
+ Option(ctx.orderedIdentifierList)
+ .toSeq
.flatMap(_.orderedIdentifier.asScala)
- .map(_.identifier.getText))
+ .map { orderedIdCtx =>
+ Option(orderedIdCtx.ordering).map(_.getText).foreach { dir =>
+ if (dir.toLowerCase != "asc") {
+ throw parseException("Only ASC ordering is supported for sorting columns", ctx)
+ }
+ }
+
+ orderedIdCtx.identifier.getText
+ })
}
/**
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index cb100021be..d6c98ea619 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -948,9 +948,9 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
sql(
s"""CREATE TABLE t USING PARQUET
- |OPTIONS (PATH '$path')
- |PARTITIONED BY (a)
- |AS SELECT 1 AS a, 2 AS b
+ |OPTIONS (PATH '$path')
+ |PARTITIONED BY (a)
+ |AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
@@ -972,9 +972,9 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
sql(
s"""CREATE TABLE t USING PARQUET
- |OPTIONS (PATH '$path')
- |CLUSTERED BY (a) SORTED BY (b) INTO 2 BUCKETS
- |AS SELECT 1 AS a, 2 AS b
+ |OPTIONS (PATH '$path')
+ |CLUSTERED BY (a) SORTED BY (b) INTO 2 BUCKETS
+ |AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
@@ -992,9 +992,9 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
sql(
s"""CREATE TABLE t USING PARQUET
- |OPTIONS (PATH '$path')
- |CLUSTERED BY (a) INTO 2 BUCKETS
- |AS SELECT 1 AS a, 2 AS b
+ |OPTIONS (PATH '$path')
+ |CLUSTERED BY (a) INTO 2 BUCKETS
+ |AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
@@ -1016,10 +1016,10 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
sql(
s"""CREATE TABLE t USING PARQUET
- |OPTIONS (PATH '$path')
- |PARTITIONED BY (a)
- |CLUSTERED BY (b) SORTED BY (c) INTO 2 BUCKETS
- |AS SELECT 1 AS a, 2 AS b, 3 AS c
+ |OPTIONS (PATH '$path')
+ |PARTITIONED BY (a)
+ |CLUSTERED BY (b) SORTED BY (c) INTO 2 BUCKETS
+ |AS SELECT 1 AS a, 2 AS b, 3 AS c
""".stripMargin
)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 100cb3cef8..f20ab36efb 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{EliminateSubqueryAliases, FunctionRegistry}
+import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.{HiveUtils, MetastoreRelation}
@@ -1488,4 +1489,20 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
"Once a managed table has been dropped, " +
"dirs of this table should also have been deleted.")
}
+
+ test("SPARK-14981: DESC not supported for sorting columns") {
+ withTable("t") {
+ val cause = intercept[ParseException] {
+ sql(
+ """CREATE TABLE t USING PARQUET
+ |OPTIONS (PATH '/path/to/file')
+ |CLUSTERED BY (a) SORTED BY (b DESC) INTO 2 BUCKETS
+ |AS SELECT 1 AS a, 2 AS b
+ """.stripMargin
+ )
+ }
+
+ assert(cause.getMessage.contains("Only ASC ordering is supported for sorting columns"))
+ }
+ }
}