aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/scala/org/apache/spark/sql/execution
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src/test/scala/org/apache/spark/sql/execution')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/DataSourceScanExecRedactionSuite.scala11
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLogSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala4
4 files changed, 8 insertions, 13 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/DataSourceScanExecRedactionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/DataSourceScanExecRedactionSuite.scala
index 05a2b2c862..f7f1ccea28 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/DataSourceScanExecRedactionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/DataSourceScanExecRedactionSuite.scala
@@ -18,22 +18,17 @@ package org.apache.spark.sql.execution
import org.apache.hadoop.fs.Path
+import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSQLContext
-import org.apache.spark.util.Utils
/**
* Suite that tests the redaction of DataSourceScanExec
*/
class DataSourceScanExecRedactionSuite extends QueryTest with SharedSQLContext {
- import Utils._
-
- override def beforeAll(): Unit = {
- sparkConf.set("spark.redaction.string.regex",
- "file:/[\\w_]+")
- super.beforeAll()
- }
+ override protected def sparkConf: SparkConf = super.sparkConf
+ .set("spark.redaction.string.regex", "file:/[\\w_]+")
test("treeString is redacted") {
withTempDir { dir =>
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
index f36162858b..8703fe96e5 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
@@ -42,7 +42,7 @@ import org.apache.spark.util.Utils
class FileSourceStrategySuite extends QueryTest with SharedSQLContext with PredicateHelper {
import testImplicits._
- protected override val sparkConf = new SparkConf().set("spark.default.parallelism", "1")
+ protected override def sparkConf = super.sparkConf.set("spark.default.parallelism", "1")
test("unpartitioned table, single partition") {
val table =
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLogSuite.scala
index 20ac06f048..3d480b148d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLogSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLogSuite.scala
@@ -28,8 +28,8 @@ import org.apache.spark.sql.test.SharedSQLContext
class CompactibleFileStreamLogSuite extends SparkFunSuite with SharedSQLContext {
/** To avoid caching of FS objects */
- override protected val sparkConf =
- new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
+ override protected def sparkConf =
+ super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
import CompactibleFileStreamLog._
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala
index 662c4466b2..7689bc03a4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala
@@ -38,8 +38,8 @@ import org.apache.spark.util.UninterruptibleThread
class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext {
/** To avoid caching of FS objects */
- override protected val sparkConf =
- new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
+ override protected def sparkConf =
+ super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
private implicit def toOption[A](a: A): Option[A] = Option(a)