aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala4
2 files changed, 4 insertions, 4 deletions
diff --git a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
index 69ff6c7c28..6724af9525 100644
--- a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
@@ -32,6 +32,7 @@ import org.apache.spark.internal.Logging
import org.apache.spark.rdd.{RDD, ReliableRDDCheckpointData}
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.storage._
+import org.apache.spark.util.Utils
/**
* An abstract base class for context cleaner tests, which sets up a context with a config
@@ -206,8 +207,7 @@ class ContextCleanerSuite extends ContextCleanerSuiteBase {
}
test("automatically cleanup normal checkpoint") {
- val checkpointDir = java.io.File.createTempFile("temp", "")
- checkpointDir.deleteOnExit()
+ val checkpointDir = Utils.createTempDir()
checkpointDir.delete()
var rdd = newPairRDD()
sc.setCheckpointDir(checkpointDir.toString)
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
index c69027babb..11faa6192b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
@@ -28,6 +28,7 @@ import org.apache.spark.ml.linalg.Vector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
+import org.apache.spark.util.Utils
/**
* An example of how to use [[org.apache.spark.sql.DataFrame]] for ML. Run with
@@ -86,8 +87,7 @@ object DataFrameExample {
println(s"Selected features column with average values:\n ${featureSummary.mean.toString}")
// Save the records in a parquet file.
- val tmpDir = Files.createTempDir()
- tmpDir.deleteOnExit()
+ val tmpDir = Utils.createTempDir()
val outputDir = new File(tmpDir, "dataframe").toString
println(s"Saving to $outputDir as Parquet file.")
df.write.parquet(outputDir)