aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-03-20 14:16:21 +0000
committerSean Owen <sowen@cloudera.com>2015-03-20 14:16:21 +0000
commit6f80c3e8880340597f161f87e64697bec86cc586 (patch)
treec019ca07ed3b4dd178c102aac00418485da5e679 /sql/hive
parentd08e3eb3dc455970b685a7b8b7e00c537c89a8e4 (diff)
downloadspark-6f80c3e8880340597f161f87e64697bec86cc586.tar.gz
spark-6f80c3e8880340597f161f87e64697bec86cc586.tar.bz2
spark-6f80c3e8880340597f161f87e64697bec86cc586.zip
SPARK-6338 [CORE] Use standard temp dir mechanisms in tests to avoid orphaned temp files
Use `Utils.createTempDir()` to replace other temp file mechanisms used in some tests, to further ensure they are cleaned up, and simplify Author: Sean Owen <sowen@cloudera.com> Closes #5029 from srowen/SPARK-6338 and squashes the following commits: 27b740a [Sean Owen] Fix hive-thriftserver tests that don't expect an existing dir 4a212fa [Sean Owen] Standardize a bit more temp dir management 9004081 [Sean Owen] Revert some added recursive-delete calls 57609e4 [Sean Owen] Use Utils.createTempDir() to replace other temp file mechanisms used in some tests, to further ensure they are cleaned up, and simplify
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala16
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala5
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala14
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala22
4 files changed, 21 insertions, 36 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index 4859991e23..b4aee78046 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -30,7 +30,6 @@ import org.apache.hadoop.hive.serde2.avro.AvroSerDe
import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
-import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.CacheTableCommand
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.execution.HiveNativeCommand
@@ -69,22 +68,19 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
hiveconf.set("hive.plan.serialization.format", "javaXML")
- lazy val warehousePath = getTempFilePath("sparkHiveWarehouse").getCanonicalPath
- lazy val metastorePath = getTempFilePath("sparkHiveMetastore").getCanonicalPath
+ lazy val warehousePath = Utils.createTempDir()
+ lazy val metastorePath = Utils.createTempDir()
/** Sets up the system initially or after a RESET command */
protected def configure(): Unit = {
+ warehousePath.delete()
+ metastorePath.delete()
setConf("javax.jdo.option.ConnectionURL",
s"jdbc:derby:;databaseName=$metastorePath;create=true")
- setConf("hive.metastore.warehouse.dir", warehousePath)
- Utils.registerShutdownDeleteDir(new File(warehousePath))
- Utils.registerShutdownDeleteDir(new File(metastorePath))
+ setConf("hive.metastore.warehouse.dir", warehousePath.toString)
}
- val testTempDir = File.createTempFile("testTempFiles", "spark.hive.tmp")
- testTempDir.delete()
- testTempDir.mkdir()
- Utils.registerShutdownDeleteDir(testTempDir)
+ val testTempDir = Utils.createTempDir()
// For some hive test case which contain ${system:test.tmp.dir}
System.setProperty("test.tmp.dir", testTempDir.getCanonicalPath)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index d4b175fa44..381cd2a291 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -21,12 +21,11 @@ import java.io.File
import org.scalatest.BeforeAndAfter
-import com.google.common.io.Files
-
import org.apache.spark.sql.execution.QueryExecutionException
import org.apache.spark.sql.{QueryTest, _}
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.types._
+import org.apache.spark.util.Utils
/* Implicits */
import org.apache.spark.sql.hive.test.TestHive._
@@ -112,7 +111,7 @@ class InsertIntoHiveTableSuite extends QueryTest with BeforeAndAfter {
test("SPARK-4203:random partition directory order") {
sql("CREATE TABLE tmp_table (key int, value string)")
- val tmpDir = Files.createTempDir()
+ val tmpDir = Utils.createTempDir()
sql(s"CREATE TABLE table_with_partition(c1 string) PARTITIONED by (p1 string,p2 string,p3 string,p4 string,p5 string) location '${tmpDir.toURI.toString}' ")
sql("INSERT OVERWRITE TABLE table_with_partition partition (p1='a',p2='b',p3='c',p4='c',p5='1') SELECT 'blarr' FROM tmp_table")
sql("INSERT OVERWRITE TABLE table_with_partition partition (p1='a',p2='b',p3='c',p4='c',p5='2') SELECT 'blarr' FROM tmp_table")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index 5d6a6f3b64..ff2e6ea9ea 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -19,13 +19,14 @@ package org.apache.spark.sql.hive
import java.io.File
+import scala.collection.mutable.ArrayBuffer
+
import org.scalatest.BeforeAndAfterEach
import org.apache.commons.io.FileUtils
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.InvalidInputException
-import org.apache.spark.sql.catalyst.util
import org.apache.spark.sql._
import org.apache.spark.util.Utils
import org.apache.spark.sql.types._
@@ -34,8 +35,6 @@ import org.apache.spark.sql.hive.test.TestHive.implicits._
import org.apache.spark.sql.parquet.ParquetRelation2
import org.apache.spark.sql.sources.LogicalRelation
-import scala.collection.mutable.ArrayBuffer
-
/**
* Tests for persisting tables created though the data sources API into the metastore.
*/
@@ -43,11 +42,12 @@ class MetastoreDataSourcesSuite extends QueryTest with BeforeAndAfterEach {
override def afterEach(): Unit = {
reset()
- if (tempPath.exists()) Utils.deleteRecursively(tempPath)
+ Utils.deleteRecursively(tempPath)
}
val filePath = Utils.getSparkClassLoader.getResource("sample.json").getFile
- var tempPath: File = util.getTempFilePath("jsonCTAS").getCanonicalFile
+ var tempPath: File = Utils.createTempDir()
+ tempPath.delete()
test ("persistent JSON table") {
sql(
@@ -154,7 +154,7 @@ class MetastoreDataSourcesSuite extends QueryTest with BeforeAndAfterEach {
}
test("check change without refresh") {
- val tempDir = File.createTempFile("sparksql", "json")
+ val tempDir = File.createTempFile("sparksql", "json", Utils.createTempDir())
tempDir.delete()
sparkContext.parallelize(("a", "b") :: Nil).toDF()
.toJSON.saveAsTextFile(tempDir.getCanonicalPath)
@@ -192,7 +192,7 @@ class MetastoreDataSourcesSuite extends QueryTest with BeforeAndAfterEach {
}
test("drop, change, recreate") {
- val tempDir = File.createTempFile("sparksql", "json")
+ val tempDir = File.createTempFile("sparksql", "json", Utils.createTempDir())
tempDir.delete()
sparkContext.parallelize(("a", "b") :: Nil).toDF()
.toJSON.saveAsTextFile(tempDir.getCanonicalPath)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
index 1904f5faef..d891c4e890 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
@@ -32,6 +32,7 @@ import org.apache.spark.sql.sources.{InsertIntoDataSource, LogicalRelation}
import org.apache.spark.sql.parquet.{ParquetRelation2, ParquetTableScan}
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.types._
+import org.apache.spark.util.Utils
// The data where the partitioning key exists only in the directory structure.
case class ParquetData(intField: Int, stringField: String)
@@ -579,13 +580,8 @@ abstract class ParquetPartitioningTest extends QueryTest with BeforeAndAfterAll
var partitionedTableDirWithKeyAndComplexTypes: File = null
override def beforeAll(): Unit = {
- partitionedTableDir = File.createTempFile("parquettests", "sparksql")
- partitionedTableDir.delete()
- partitionedTableDir.mkdir()
-
- normalTableDir = File.createTempFile("parquettests", "sparksql")
- normalTableDir.delete()
- normalTableDir.mkdir()
+ partitionedTableDir = Utils.createTempDir()
+ normalTableDir = Utils.createTempDir()
(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDir, s"p=$p")
@@ -601,9 +597,7 @@ abstract class ParquetPartitioningTest extends QueryTest with BeforeAndAfterAll
.toDF()
.saveAsParquetFile(new File(normalTableDir, "normal").getCanonicalPath)
- partitionedTableDirWithKey = File.createTempFile("parquettests", "sparksql")
- partitionedTableDirWithKey.delete()
- partitionedTableDirWithKey.mkdir()
+ partitionedTableDirWithKey = Utils.createTempDir()
(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDirWithKey, s"p=$p")
@@ -613,9 +607,7 @@ abstract class ParquetPartitioningTest extends QueryTest with BeforeAndAfterAll
.saveAsParquetFile(partDir.getCanonicalPath)
}
- partitionedTableDirWithKeyAndComplexTypes = File.createTempFile("parquettests", "sparksql")
- partitionedTableDirWithKeyAndComplexTypes.delete()
- partitionedTableDirWithKeyAndComplexTypes.mkdir()
+ partitionedTableDirWithKeyAndComplexTypes = Utils.createTempDir()
(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDirWithKeyAndComplexTypes, s"p=$p")
@@ -625,9 +617,7 @@ abstract class ParquetPartitioningTest extends QueryTest with BeforeAndAfterAll
}.toDF().saveAsParquetFile(partDir.getCanonicalPath)
}
- partitionedTableDirWithComplexTypes = File.createTempFile("parquettests", "sparksql")
- partitionedTableDirWithComplexTypes.delete()
- partitionedTableDirWithComplexTypes.mkdir()
+ partitionedTableDirWithComplexTypes = Utils.createTempDir()
(1 to 10).foreach { p =>
val partDir = new File(partitionedTableDirWithComplexTypes, s"p=$p")