aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2016-06-21 11:58:33 -0700
committerYin Huai <yhuai@databricks.com>2016-06-21 11:58:33 -0700
commitf4a3d45e38f18278bbdb7cc32486ded50f76d54b (patch)
tree702f01e34700d8fd4fea36609cabb91940b45f53 /sql/hive
parentb76e3553760b3c68bebc2c71b0851598718e6f87 (diff)
downloadspark-f4a3d45e38f18278bbdb7cc32486ded50f76d54b.tar.gz
spark-f4a3d45e38f18278bbdb7cc32486ded50f76d54b.tar.bz2
spark-f4a3d45e38f18278bbdb7cc32486ded50f76d54b.zip
[SPARK-16037][SQL] Follow-up: add DataFrameWriter.insertInto() test cases for by position resolution
## What changes were proposed in this pull request? This PR migrates some test cases introduced in #12313 as a follow-up of #13754 and #13766. These test cases cover `DataFrameWriter.insertInto()`, while the former two only cover SQL `INSERT` statements. Note that the `testPartitionedTable` utility method tests both Hive SerDe tables and data source tables. ## How was this patch tested? N/A Author: Cheng Lian <lian@databricks.com> Closes #13810 from liancheng/spark-16037-follow-up-tests.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala48
1 files changed, 48 insertions, 0 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index 46432512ba..d9ce1c3dc1 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -469,4 +469,52 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
)
}
}
+
+ testPartitionedTable("insertInto() should match columns by position and ignore column names") {
+ tableName =>
+ withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
+ // Columns `df.c` and `df.d` are resolved by position, and thus mapped to partition columns
+ // `b` and `c` of the target table.
+ val df = Seq((1, 2, 3, 4)).toDF("a", "b", "c", "d")
+ df.write.insertInto(tableName)
+
+ checkAnswer(
+ sql(s"SELECT a, b, c, d FROM $tableName"),
+ Row(1, 3, 4, 2)
+ )
+ }
+ }
+
+ testPartitionedTable("insertInto() should match unnamed columns by position") {
+ tableName =>
+ withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
+ // Columns `c + 1` and `d + 1` are resolved by position, and thus mapped to partition
+ // columns `b` and `c` of the target table.
+ val df = Seq((1, 2, 3, 4)).toDF("a", "b", "c", "d")
+ df.select('a + 1, 'b + 1, 'c + 1, 'd + 1).write.insertInto(tableName)
+
+ checkAnswer(
+ sql(s"SELECT a, b, c, d FROM $tableName"),
+ Row(2, 4, 5, 3)
+ )
+ }
+ }
+
+ testPartitionedTable("insertInto() should reject missing columns") {
+ tableName =>
+ sql("CREATE TABLE t (a INT, b INT)")
+
+ intercept[AnalysisException] {
+ spark.table("t").write.insertInto(tableName)
+ }
+ }
+
+ testPartitionedTable("insertInto() should reject extra columns") {
+ tableName =>
+ sql("CREATE TABLE t (a INT, b INT, c INT, d INT, e INT)")
+
+ intercept[AnalysisException] {
+ spark.table("t").write.insertInto(tableName)
+ }
+ }
}