diff options
author | Brian Cho <bcho@fb.com> | 2016-09-02 11:12:34 +0800 |
---|---|---|
committer | Wenchen Fan <wenchen@databricks.com> | 2016-09-02 11:12:34 +0800 |
commit | f2d6e2ef23b3f862c336ce5f7b98c43c3fde1e36 (patch) | |
tree | c755d6fce75c57ef1a294819ce1ffe15c3868377 /sql/hive | |
parent | 06e33985c631fe91e1c4cef6039b8752548cc435 (diff) | |
download | spark-f2d6e2ef23b3f862c336ce5f7b98c43c3fde1e36.tar.gz spark-f2d6e2ef23b3f862c336ce5f7b98c43c3fde1e36.tar.bz2 spark-f2d6e2ef23b3f862c336ce5f7b98c43c3fde1e36.zip |
[SPARK-16926][SQL] Add unit test to compare table and partition column metadata.
## What changes were proposed in this pull request?
Add unit test for changes made in PR #14515. It makes sure that a newly created table has the same number of columns in table and partition metadata. This test fails before the changes introduced in #14515.
## How was this patch tested?
Run new unit test.
Author: Brian Cho <bcho@fb.com>
Closes #14930 from dafrista/partition-metadata-unit-test.
Diffstat (limited to 'sql/hive')
-rw-r--r-- | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala index 5b464764f0..5c460d25f3 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala @@ -18,6 +18,7 @@ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.Row +import org.apache.spark.sql.hive.MetastoreRelation import org.apache.spark.sql.hive.test.{TestHive, TestHiveSingleton} import org.apache.spark.sql.hive.test.TestHive._ import org.apache.spark.sql.hive.test.TestHive.implicits._ @@ -143,4 +144,38 @@ class HiveTableScanSuite extends HiveComparisonTest with SQLTestUtils with TestH } } } + + test("SPARK-16926: number of table and partition columns match for new partitioned table") { + val view = "src" + withTempView(view) { + spark.range(1, 5).createOrReplaceTempView(view) + val table = "table_with_partition" + withTable(table) { + sql( + s""" + |CREATE TABLE $table(id string) + |PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string) + """.stripMargin) + sql( + s""" + |FROM $view v + |INSERT INTO TABLE $table + |PARTITION (p1='a',p2='b',p3='c',p4='d',p5='e') + |SELECT v.id + |INSERT INTO TABLE $table + |PARTITION (p1='a',p2='c',p3='c',p4='d',p5='e') + |SELECT v.id + """.stripMargin) + val plan = sql( + s""" + |SELECT * FROM $table + """.stripMargin).queryExecution.sparkPlan + val relation = plan.collectFirst { + case p: HiveTableScanExec => p.relation + }.get + val tableCols = relation.hiveQlTable.getCols + relation.getHiveQlPartitions().foreach(p => assert(p.getCols.size == tableCols.size)) + } + } + } } |