aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/scala
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src/test/scala')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala17
1 files changed, 12 insertions, 5 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index e6ae42258d..b343454b12 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -265,7 +265,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
userSpecifiedPartitionCols.map(p => s"PARTITIONED BY ($p)").getOrElse("")
val schemaClause = userSpecifiedSchema.map(s => s"($s)").getOrElse("")
val uri = path.toURI
- sql(
+ val sqlCreateTable =
s"""
|CREATE TABLE $tabName $schemaClause
|USING parquet
@@ -273,11 +273,18 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
| path '$uri'
|)
|$partitionClause
- """.stripMargin)
- val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName))
+ """.stripMargin
+ if (userSpecifiedSchema.isEmpty && userSpecifiedPartitionCols.nonEmpty) {
+ val e = intercept[AnalysisException](sql(sqlCreateTable)).getMessage
+ assert(e.contains(
+ "not allowed to specify partition columns when the table schema is not defined"))
+ } else {
+ sql(sqlCreateTable)
+ val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName))
- assert(expectedSchema == tableMetadata.schema)
- assert(expectedPartitionCols == tableMetadata.partitionColumnNames)
+ assert(expectedSchema == tableMetadata.schema)
+ assert(expectedPartitionCols == tableMetadata.partitionColumnNames)
+ }
}
}