aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-03-20 14:16:21 +0000
committerSean Owen <sowen@cloudera.com>2015-03-20 14:16:21 +0000
commit6f80c3e8880340597f161f87e64697bec86cc586 (patch)
treec019ca07ed3b4dd178c102aac00418485da5e679 /sql/catalyst
parentd08e3eb3dc455970b685a7b8b7e00c537c89a8e4 (diff)
downloadspark-6f80c3e8880340597f161f87e64697bec86cc586.tar.gz
spark-6f80c3e8880340597f161f87e64697bec86cc586.tar.bz2
spark-6f80c3e8880340597f161f87e64697bec86cc586.zip
SPARK-6338 [CORE] Use standard temp dir mechanisms in tests to avoid orphaned temp files
Use `Utils.createTempDir()` to replace other temp file mechanisms used in some tests, to further ensure they are cleaned up, and simplify Author: Sean Owen <sowen@cloudera.com> Closes #5029 from srowen/SPARK-6338 and squashes the following commits: 27b740a [Sean Owen] Fix hive-thriftserver tests that don't expect an existing dir 4a212fa [Sean Owen] Standardize a bit more temp dir management 9004081 [Sean Owen] Revert some added recursive-delete calls 57609e4 [Sean Owen] Use Utils.createTempDir() to replace other temp file mechanisms used in some tests, to further ensure they are cleaned up, and simplify
Diffstat (limited to 'sql/catalyst')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala15
2 files changed, 4 insertions, 15 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
index 80c7dfd376..528e38a50a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
@@ -19,7 +19,7 @@ package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.catalyst.rules
-import org.apache.spark.sql.catalyst.util
+import org.apache.spark.util.Utils
/**
* A collection of generators that build custom bytecode at runtime for performing the evaluation
@@ -52,7 +52,7 @@ package object codegen {
@DeveloperApi
object DumpByteCode {
import scala.sys.process._
- val dumpDirectory = util.getTempFilePath("sparkSqlByteCode")
+ val dumpDirectory = Utils.createTempDir()
dumpDirectory.mkdir()
def apply(obj: Any): Unit = {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
index d8da45ae70..feed50f9a2 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
@@ -19,20 +19,9 @@ package org.apache.spark.sql.catalyst
import java.io.{PrintWriter, ByteArrayOutputStream, FileInputStream, File}
-import org.apache.spark.util.{Utils => SparkUtils}
+import org.apache.spark.util.Utils
package object util {
- /**
- * Returns a path to a temporary file that probably does not exist.
- * Note, there is always the race condition that someone created this
- * file since the last time we checked. Thus, this shouldn't be used
- * for anything security conscious.
- */
- def getTempFilePath(prefix: String, suffix: String = ""): File = {
- val tempFile = File.createTempFile(prefix, suffix)
- tempFile.delete()
- tempFile
- }
def fileToString(file: File, encoding: String = "UTF-8") = {
val inStream = new FileInputStream(file)
@@ -56,7 +45,7 @@ package object util {
def resourceToString(
resource:String,
encoding: String = "UTF-8",
- classLoader: ClassLoader = SparkUtils.getSparkClassLoader) = {
+ classLoader: ClassLoader = Utils.getSparkClassLoader) = {
val inStream = classLoader.getResourceAsStream(resource)
val outStream = new ByteArrayOutputStream
try {