aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test
diff options
context:
space:
mode:
authorEric Liang <ekl@databricks.com>2016-12-02 21:59:02 +0800
committerWenchen Fan <wenchen@databricks.com>2016-12-02 21:59:02 +0800
commit7935c8470c5c162ef7213e394fe8588e5dd42ca2 (patch)
tree0b35862d5113a400a2464c22282f6040b6bcc0d5 /sql/hive/src/test
parent55d528f2ba0ba689dbb881616d9436dc7958e943 (diff)
downloadspark-7935c8470c5c162ef7213e394fe8588e5dd42ca2.tar.gz
spark-7935c8470c5c162ef7213e394fe8588e5dd42ca2.tar.bz2
spark-7935c8470c5c162ef7213e394fe8588e5dd42ca2.zip
[SPARK-18659][SQL] Incorrect behaviors in overwrite table for datasource tables
## What changes were proposed in this pull request? Two bugs are addressed here 1. INSERT OVERWRITE TABLE sometime crashed when catalog partition management was enabled. This was because when dropping partitions after an overwrite operation, the Hive client will attempt to delete the partition files. If the entire partition directory was dropped, this would fail. The PR fixes this by adding a flag to control whether the Hive client should attempt to delete files. 2. The static partition spec for OVERWRITE TABLE was not correctly resolved to the case-sensitive original partition names. This resulted in the entire table being overwritten if you did not correctly capitalize your partition names. cc yhuai cloud-fan ## How was this patch tested? Unit tests. Surprisingly, the existing overwrite table tests did not catch these edge cases. Author: Eric Liang <ekl@databricks.com> Closes #16088 from ericl/spark-18659.
Diffstat (limited to 'sql/hive/src/test')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala35
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala4
2 files changed, 37 insertions, 2 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala
index e8e4238d1c..c2ac032760 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala
@@ -259,6 +259,41 @@ class PartitionProviderCompatibilitySuite
}
}
}
+
+ test(s"SPARK-18659 insert overwrite table files - partition management $enabled") {
+ withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> enabled.toString) {
+ withTable("test") {
+ spark.range(10)
+ .selectExpr("id", "id as A", "'x' as B")
+ .write.partitionBy("A", "B").mode("overwrite")
+ .saveAsTable("test")
+ spark.sql("insert overwrite table test select id, id, 'x' from range(1)")
+ assert(spark.sql("select * from test").count() == 1)
+
+ spark.range(10)
+ .selectExpr("id", "id as A", "'x' as B")
+ .write.partitionBy("A", "B").mode("overwrite")
+ .saveAsTable("test")
+ spark.sql(
+ "insert overwrite table test partition (A, B) select id, id, 'x' from range(1)")
+ assert(spark.sql("select * from test").count() == 1)
+ }
+ }
+ }
+
+ test(s"SPARK-18659 insert overwrite table with lowercase - partition management $enabled") {
+ withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> enabled.toString) {
+ withTable("test") {
+ spark.range(10)
+ .selectExpr("id", "id as A", "'x' as B")
+ .write.partitionBy("A", "B").mode("overwrite")
+ .saveAsTable("test")
+ // note that 'A', 'B' are lowercase instead of their original case here
+ spark.sql("insert overwrite table test partition (a=1, b) select id, 'x' from range(1)")
+ assert(spark.sql("select * from test").count() == 10)
+ }
+ }
+ }
}
/**
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
index 081b0ed9bd..16ae345de6 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
@@ -352,13 +352,13 @@ class VersionsSuite extends SparkFunSuite with Logging {
// with a version that is older than the minimum (1.2 in this case).
try {
client.dropPartitions("default", "src_part", Seq(spec), ignoreIfNotExists = true,
- purge = true)
+ purge = true, retainData = false)
assert(!versionsWithoutPurge.contains(version))
} catch {
case _: UnsupportedOperationException =>
assert(versionsWithoutPurge.contains(version))
client.dropPartitions("default", "src_part", Seq(spec), ignoreIfNotExists = true,
- purge = false)
+ purge = false, retainData = false)
}
assert(client.getPartitionOption("default", "src_part", spec).isEmpty)