aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2016-06-19 21:45:53 -0700
committerYin Huai <yhuai@databricks.com>2016-06-19 21:45:53 -0700
commit6d0f921aedfdd3b7e8472b6776d0c7d8299190bd (patch)
tree29f6f34219596d37c44927ff386a77550854bf41 /sql/core/src/test
parent4f17fddcd57adeae0d7e31bd14423283d4b625e9 (diff)
downloadspark-6d0f921aedfdd3b7e8472b6776d0c7d8299190bd.tar.gz
spark-6d0f921aedfdd3b7e8472b6776d0c7d8299190bd.tar.bz2
spark-6d0f921aedfdd3b7e8472b6776d0c7d8299190bd.zip
[SPARK-16036][SPARK-16037][SPARK-16034][SQL] Follow up code clean up and improvement
## What changes were proposed in this pull request? This PR is the follow-up PR for https://github.com/apache/spark/pull/13754/files and https://github.com/apache/spark/pull/13749. I will comment inline to explain my changes. ## How was this patch tested? Existing tests. Author: Yin Huai <yhuai@databricks.com> Closes #13766 from yhuai/caseSensitivity.
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala20
1 files changed, 18 insertions, 2 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index 8827649d0a..f40ddcc95a 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -1337,8 +1337,24 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
assert(sql("select * from partitionedTable").collect().size == 1)
// Inserts new data successfully when partition columns are correctly specified in
// partitionBy(...).
- df.write.mode("append").partitionBy("a", "b").saveAsTable("partitionedTable")
- assert(sql("select * from partitionedTable").collect().size == 2)
+ // TODO: Right now, partition columns are always treated in a case-insensitive way.
+ // See the write method in DataSource.scala.
+ Seq((4, 5, 6)).toDF("a", "B", "c")
+ .write
+ .mode("append")
+ .partitionBy("a", "B")
+ .saveAsTable("partitionedTable")
+
+ Seq((7, 8, 9)).toDF("a", "b", "c")
+ .write
+ .mode("append")
+ .partitionBy("a", "b")
+ .saveAsTable("partitionedTable")
+
+ checkAnswer(
+ sql("select a, b, c from partitionedTable"),
+ Row(1, 2, 3) :: Row(4, 5, 6) :: Row(7, 8, 9) :: Nil
+ )
}
}
}