aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test/scala/org/apache
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2017-03-02 00:45:59 +0100
committerSean Owen <sowen@cloudera.com>2017-03-02 00:45:59 +0100
commitdb0ddce523bb823cba996e92ef36ceca31492d2c (patch)
treebea313c008832f75c0638f174afaeaf365c56064 /sql/hive/src/test/scala/org/apache
parent2ff1467d676c9671da231db86bdc8e09c7450f80 (diff)
downloadspark-db0ddce523bb823cba996e92ef36ceca31492d2c.tar.gz
spark-db0ddce523bb823cba996e92ef36ceca31492d2c.tar.bz2
spark-db0ddce523bb823cba996e92ef36ceca31492d2c.zip
[SPARK-19775][SQL] Remove an obsolete `partitionBy().insertInto()` test case
## What changes were proposed in this pull request? This issue removes [a test case](https://github.com/apache/spark/blame/master/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala#L287-L298) which was introduced by [SPARK-14459](https://github.com/apache/spark/commit/652bbb1bf62722b08a062c7a2bf72019f85e179e) and was superseded by [SPARK-16033](https://github.com/apache/spark/blame/master/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala#L365-L371). Basically, we cannot use `partitionBy` and `insertInto` together. ```scala test("Reject partitioning that does not match table") { withSQLConf(("hive.exec.dynamic.partition.mode", "nonstrict")) { sql("CREATE TABLE partitioned (id bigint, data string) PARTITIONED BY (part string)") val data = (1 to 10).map(i => (i, s"data-$i", if ((i % 2) == 0) "even" else "odd")) .toDF("id", "data", "part") intercept[AnalysisException] { // cannot partition by 2 fields when there is only one in the table definition data.write.partitionBy("part", "data").insertInto("partitioned") } } } ``` ## How was this patch tested? This only removes a test case. Pass the existing Jenkins test. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #17106 from dongjoon-hyun/SPARK-19775.
Diffstat (limited to 'sql/hive/src/test/scala/org/apache')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala13
1 files changed, 0 insertions, 13 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index 71ce5a7c4a..d6999af84e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -284,19 +284,6 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
sql("DROP TABLE hiveTableWithStructValue")
}
- test("Reject partitioning that does not match table") {
- withSQLConf(("hive.exec.dynamic.partition.mode", "nonstrict")) {
- sql("CREATE TABLE partitioned (id bigint, data string) PARTITIONED BY (part string)")
- val data = (1 to 10).map(i => (i, s"data-$i", if ((i % 2) == 0) "even" else "odd"))
- .toDF("id", "data", "part")
-
- intercept[AnalysisException] {
- // cannot partition by 2 fields when there is only one in the table definition
- data.write.partitionBy("part", "data").insertInto("partitioned")
- }
- }
- }
-
test("Test partition mode = strict") {
withSQLConf(("hive.exec.dynamic.partition.mode", "strict")) {
sql("CREATE TABLE partitioned (id bigint, data string) PARTITIONED BY (part string)")