aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test/scala/org
diff options
context:
space:
mode:
authorWenchen Fan <wenchen@databricks.com>2016-11-08 22:28:29 +0800
committerWenchen Fan <wenchen@databricks.com>2016-11-08 22:28:29 +0800
commit73feaa30ebfb62c81c7ce2c60ce2163611dd8852 (patch)
treef7053406a7f29d8851920d9acef96de3ccce4ee4 /sql/hive/src/test/scala/org
parent344dcad70173abcb348c68fdb0219960b5b06635 (diff)
downloadspark-73feaa30ebfb62c81c7ce2c60ce2163611dd8852.tar.gz
spark-73feaa30ebfb62c81c7ce2c60ce2163611dd8852.tar.bz2
spark-73feaa30ebfb62c81c7ce2c60ce2163611dd8852.zip
[SPARK-18346][SQL] TRUNCATE TABLE should fail if no partition is matched for the given non-partial partition spec
## What changes were proposed in this pull request? a follow up of https://github.com/apache/spark/pull/15688 ## How was this patch tested? updated test in `DDLSuite` Author: Wenchen Fan <wenchen@databricks.com> Closes #15805 from cloud-fan/truncate.
Diffstat (limited to 'sql/hive/src/test/scala/org')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala12
1 files changed, 5 insertions, 7 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
index 4150e649be..0076a77868 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
@@ -23,11 +23,10 @@ import org.apache.hadoop.fs.Path
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
-import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
+import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, TableAlreadyExistsException}
import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogTable, CatalogTableType}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.command.DDLUtils
-import org.apache.spark.sql.execution.datasources.CaseInsensitiveMap
import org.apache.spark.sql.hive.HiveExternalCatalog
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
@@ -1149,11 +1148,10 @@ class HiveDDLSuite
sql("TRUNCATE TABLE partTable PARTITION (width=100)")
assert(spark.table("partTable").count() == data.count())
- // do nothing if no partition is matched for the given non-partial partition spec
- // TODO: This behaviour is different from Hive, we should decide whether we need to follow
- // Hive's behaviour or stick with our existing behaviour later.
- sql("TRUNCATE TABLE partTable PARTITION (width=100, length=100)")
- assert(spark.table("partTable").count() == data.count())
+ // throw exception if no partition is matched for the given non-partial partition spec.
+ intercept[NoSuchPartitionException] {
+ sql("TRUNCATE TABLE partTable PARTITION (width=100, length=100)")
+ }
// throw exception if the column in partition spec is not a partition column.
val e = intercept[AnalysisException] {