aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorgatorsmile <gatorsmile@gmail.com>2016-04-05 22:33:44 -0700
committerAndrew Or <andrew@databricks.com>2016-04-05 22:33:44 -0700
commit68be5b9e8a5ac1fc4d243bb54c2ca95fee3f74dc (patch)
tree9ba0f8e3dc8af4ade430b5c96ce29ec6db0f574d /sql/core
parent48467f4eb02209a884adbcf052670a057a75fcbd (diff)
downloadspark-68be5b9e8a5ac1fc4d243bb54c2ca95fee3f74dc.tar.gz
spark-68be5b9e8a5ac1fc4d243bb54c2ca95fee3f74dc.tar.bz2
spark-68be5b9e8a5ac1fc4d243bb54c2ca95fee3f74dc.zip
[SPARK-14396][SQL] Throw Exceptions for DDLs of Partitioned Views
#### What changes were proposed in this pull request? Because the concept of partitioning is associated with physical tables, we disable all the supports of partitioned views, which are defined in the following three commands in [Hive DDL Manual](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Create/Drop/AlterView): ``` ALTER VIEW view DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...]; ALTER VIEW view ADD [IF NOT EXISTS] PARTITION spec; CREATE VIEW [IF NOT EXISTS] [db_name.]view_name [(column_name [COMMENT column_comment], ...) ] [COMMENT view_comment] [TBLPROPERTIES (property_name = property_value, ...)] AS SELECT ...; ``` An exception is thrown when users issue any of these three DDL commands. #### How was this patch tested? Added test cases for parsing create view and changed the existing test cases to verify if the exceptions are thrown. Author: gatorsmile <gatorsmile@gmail.com> Author: xiaoli <lixiao1983@gmail.com> Author: Xiao Li <xiaoli@Xiaos-MacBook-Pro.local> Closes #12169 from gatorsmile/viewPartition.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala10
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala48
2 files changed, 22 insertions, 36 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index d3086fc91e..3de8aa0276 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -20,7 +20,7 @@ import scala.collection.JavaConverters._
import org.apache.spark.sql.{AnalysisException, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
-import org.apache.spark.sql.catalyst.parser.{AbstractSqlParser, AstBuilder, ParseException}
+import org.apache.spark.sql.catalyst.parser._
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation}
import org.apache.spark.sql.execution.command.{DescribeCommand => _, _}
@@ -474,9 +474,13 @@ class SparkSqlAstBuilder extends AstBuilder {
* ALTER TABLE table ADD [IF NOT EXISTS] PARTITION spec [LOCATION 'loc1']
* ALTER VIEW view ADD [IF NOT EXISTS] PARTITION spec
* }}}
+ *
+ * ALTER VIEW ... DROP PARTITION ... is not supported because the concept of partitioning
+ * is associated with physical tables
*/
override def visitAddTablePartition(
ctx: AddTablePartitionContext): LogicalPlan = withOrigin(ctx) {
+ if (ctx.VIEW != null) throw new ParseException(s"Operation not allowed: partitioned views", ctx)
// Create partition spec to location mapping.
val specsAndLocs = if (ctx.partitionSpec.isEmpty) {
ctx.partitionSpecLocation.asScala.map {
@@ -538,9 +542,13 @@ class SparkSqlAstBuilder extends AstBuilder {
* ALTER TABLE table DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...] [PURGE];
* ALTER VIEW view DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...];
* }}}
+ *
+ * ALTER VIEW ... DROP PARTITION ... is not supported because the concept of partitioning
+ * is associated with physical tables
*/
override def visitDropTablePartitions(
ctx: DropTablePartitionsContext): LogicalPlan = withOrigin(ctx) {
+ if (ctx.VIEW != null) throw new ParseException(s"Operation not allowed: partitioned views", ctx)
AlterTableDropPartition(
visitTableIdentifier(ctx.tableIdentifier),
ctx.partitionSpec.asScala.map(visitNonOptionalPartitionSpec),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
index 618c9a58a6..46dcadd690 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
@@ -351,22 +351,12 @@ class DDLCommandSuite extends PlanTest {
|(col1=NULL, cOL2='f', col3=5, COL4=true)
""".stripMargin
- val parsed1 = parser.parsePlan(sql1)
- val parsed2 = parser.parsePlan(sql2)
-
- val expected1 = AlterTableAddPartition(
- TableIdentifier("view_name", None),
- Seq(
- (Map("dt" -> "2008-08-08", "country" -> "us"), None),
- (Map("dt" -> "2009-09-09", "country" -> "uk"), None)),
- ifNotExists = true)(sql1)
- val expected2 = AlterTableAddPartition(
- TableIdentifier("view_name", None),
- Seq((Map("col1" -> "NULL", "col2" -> "f", "col3" -> "5", "col4" -> "true"), None)),
- ifNotExists = false)(sql2)
-
- comparePlans(parsed1, expected1)
- comparePlans(parsed2, expected2)
+ intercept[ParseException] {
+ parser.parsePlan(sql1)
+ }
+ intercept[ParseException] {
+ parser.parsePlan(sql2)
+ }
}
test("alter table: rename partition") {
@@ -416,8 +406,13 @@ class DDLCommandSuite extends PlanTest {
val parsed1_table = parser.parsePlan(sql1_table)
val parsed2_table = parser.parsePlan(sql2_table)
- val parsed1_view = parser.parsePlan(sql1_view)
- val parsed2_view = parser.parsePlan(sql2_view)
+
+ intercept[ParseException] {
+ parser.parsePlan(sql1_view)
+ }
+ intercept[ParseException] {
+ parser.parsePlan(sql2_view)
+ }
val tableIdent = TableIdentifier("table_name", None)
val expected1_table = AlterTableDropPartition(
@@ -435,25 +430,8 @@ class DDLCommandSuite extends PlanTest {
ifExists = false,
purge = true)(sql2_table)
- val expected1_view = AlterTableDropPartition(
- tableIdent,
- Seq(
- Map("dt" -> "2008-08-08", "country" -> "us"),
- Map("dt" -> "2009-09-09", "country" -> "uk")),
- ifExists = true,
- purge = false)(sql1_view)
- val expected2_view = AlterTableDropPartition(
- tableIdent,
- Seq(
- Map("dt" -> "2008-08-08", "country" -> "us"),
- Map("dt" -> "2009-09-09", "country" -> "uk")),
- ifExists = false,
- purge = false)(sql2_table)
-
comparePlans(parsed1_table, expected1_table)
comparePlans(parsed2_table, expected2_table)
- comparePlans(parsed1_view, expected1_view)
- comparePlans(parsed2_view, expected2_view)
}
test("alter table: archive partition") {