aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test
diff options
context:
space:
mode:
authorWenchen Fan <wenchen@databricks.com>2016-11-15 20:24:36 -0800
committerReynold Xin <rxin@databricks.com>2016-11-15 20:24:36 -0800
commit4ac9759f807d217b6f67badc6d5f6b7138eb92d2 (patch)
treed3d2827914e2b4a9a2ceb2f4b40515afe9f4e9ce /sql/hive/src/test
parent4b35d13baca189a50cdaa2ba435d10a1f953e3f8 (diff)
downloadspark-4ac9759f807d217b6f67badc6d5f6b7138eb92d2.tar.gz
spark-4ac9759f807d217b6f67badc6d5f6b7138eb92d2.tar.bz2
spark-4ac9759f807d217b6f67badc6d5f6b7138eb92d2.zip
[SPARK-18377][SQL] warehouse path should be a static conf
## What changes were proposed in this pull request? it's weird that every session can set its own warehouse path at runtime, we should forbid it and make it a static conf. ## How was this patch tested? existing tests. Author: Wenchen Fan <wenchen@databricks.com> Closes #15825 from cloud-fan/warehouse.
Diffstat (limited to 'sql/hive/src/test')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala85
1 files changed, 39 insertions, 46 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
index a2b04863d3..15e3927b75 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
@@ -722,53 +722,46 @@ class HiveDDLSuite
}
private def dropDatabase(cascade: Boolean, tableExists: Boolean): Unit = {
- withTempPath { tmpDir =>
- val path = tmpDir.toString
- withSQLConf(SQLConf.WAREHOUSE_PATH.key -> path) {
- val dbName = "db1"
- val fs = new Path(path).getFileSystem(spark.sessionState.newHadoopConf())
- val dbPath = new Path(path)
- // the database directory does not exist
- assert(!fs.exists(dbPath))
-
- sql(s"CREATE DATABASE $dbName")
- val catalog = spark.sessionState.catalog
- val expectedDBLocation = "file:" + appendTrailingSlash(dbPath.toString) + s"$dbName.db"
- val db1 = catalog.getDatabaseMetadata(dbName)
- assert(db1 == CatalogDatabase(
- dbName,
- "",
- expectedDBLocation,
- Map.empty))
- // the database directory was created
- assert(fs.exists(dbPath) && fs.isDirectory(dbPath))
- sql(s"USE $dbName")
-
- val tabName = "tab1"
- assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
- sql(s"CREATE TABLE $tabName as SELECT 1")
- assert(tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
-
- if (!tableExists) {
- sql(s"DROP TABLE $tabName")
- assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
- }
+ val dbName = "db1"
+ val dbPath = new Path(spark.sessionState.conf.warehousePath)
+ val fs = dbPath.getFileSystem(spark.sessionState.newHadoopConf())
- sql(s"USE default")
- val sqlDropDatabase = s"DROP DATABASE $dbName ${if (cascade) "CASCADE" else "RESTRICT"}"
- if (tableExists && !cascade) {
- val message = intercept[AnalysisException] {
- sql(sqlDropDatabase)
- }.getMessage
- assert(message.contains(s"Database $dbName is not empty. One or more tables exist."))
- // the database directory was not removed
- assert(fs.exists(new Path(expectedDBLocation)))
- } else {
- sql(sqlDropDatabase)
- // the database directory was removed and the inclusive table directories are also removed
- assert(!fs.exists(new Path(expectedDBLocation)))
- }
- }
+ sql(s"CREATE DATABASE $dbName")
+ val catalog = spark.sessionState.catalog
+ val expectedDBLocation = "file:" + appendTrailingSlash(dbPath.toString) + s"$dbName.db"
+ val db1 = catalog.getDatabaseMetadata(dbName)
+ assert(db1 == CatalogDatabase(
+ dbName,
+ "",
+ expectedDBLocation,
+ Map.empty))
+ // the database directory was created
+ assert(fs.exists(dbPath) && fs.isDirectory(dbPath))
+ sql(s"USE $dbName")
+
+ val tabName = "tab1"
+ assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
+ sql(s"CREATE TABLE $tabName as SELECT 1")
+ assert(tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
+
+ if (!tableExists) {
+ sql(s"DROP TABLE $tabName")
+ assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
+ }
+
+ sql(s"USE default")
+ val sqlDropDatabase = s"DROP DATABASE $dbName ${if (cascade) "CASCADE" else "RESTRICT"}"
+ if (tableExists && !cascade) {
+ val message = intercept[AnalysisException] {
+ sql(sqlDropDatabase)
+ }.getMessage
+ assert(message.contains(s"Database $dbName is not empty. One or more tables exist."))
+ // the database directory was not removed
+ assert(fs.exists(new Path(expectedDBLocation)))
+ } else {
+ sql(sqlDropDatabase)
+ // the database directory was removed and the inclusive table directories are also removed
+ assert(!fs.exists(new Path(expectedDBLocation)))
}
}