aboutsummaryrefslogtreecommitdiff
path: root/core/src/test/scala/org
diff options
context:
space:
mode:
Diffstat (limited to 'core/src/test/scala/org')
-rw-r--r--core/src/test/scala/org/apache/spark/AccumulatorSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/SparkConfSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala12
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala4
8 files changed, 15 insertions, 17 deletions
diff --git a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
index 8acd0439b6..4ff8ae57ab 100644
--- a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
@@ -347,7 +347,7 @@ private class SaveInfoListener extends SparkListener {
def getCompletedStageInfos: Seq[StageInfo] = completedStageInfos.toArray.toSeq
def getCompletedTaskInfos: Seq[TaskInfo] = completedTaskInfos.values.flatten.toSeq
def getCompletedTaskInfos(stageId: StageId, stageAttemptId: StageAttemptId): Seq[TaskInfo] =
- completedTaskInfos.get((stageId, stageAttemptId)).getOrElse(Seq.empty[TaskInfo])
+ completedTaskInfos.getOrElse((stageId, stageAttemptId), Seq.empty[TaskInfo])
/**
* If `jobCompletionCallback` is set, block until the next call has finished.
diff --git a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
index 2fe99e3f81..79881f30b2 100644
--- a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
@@ -237,7 +237,7 @@ class SparkConfSuite extends SparkFunSuite with LocalSparkContext with ResetSyst
conf.set(newName, "4")
assert(conf.get(newName) === "4")
- val count = conf.getAll.filter { case (k, v) => k.startsWith("spark.history.") }.size
+ val count = conf.getAll.count { case (k, v) => k.startsWith("spark.history.") }
assert(count === 4)
conf.set("spark.yarn.applicationMaster.waitTries", "42")
diff --git a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
index e7cc1617cd..31ce9483cf 100644
--- a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
@@ -101,7 +101,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1 until 100
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+ assert(slices.map(_.size).sum === 99)
assert(slices.forall(_.isInstanceOf[Range]))
}
@@ -109,7 +109,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1 to 100
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+ assert(slices.map(_.size).sum === 100)
assert(slices.forall(_.isInstanceOf[Range]))
}
@@ -202,7 +202,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1L until 100L
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+ assert(slices.map(_.size).sum === 99)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
@@ -210,7 +210,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1L to 100L
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+ assert(slices.map(_.size).sum === 100)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
@@ -218,7 +218,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1.0 until 100.0 by 1.0
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+ assert(slices.map(_.size).sum === 99)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
@@ -226,7 +226,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1.0 to 100.0 by 1.0
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+ assert(slices.map(_.size).sum === 100)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
diff --git a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
index 80347b800a..24daedab20 100644
--- a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
@@ -54,16 +54,16 @@ class RDDSuite extends SparkFunSuite with SharedSparkContext {
assert(!nums.isEmpty())
assert(nums.max() === 4)
assert(nums.min() === 1)
- val partitionSums = nums.mapPartitions(iter => Iterator(iter.reduceLeft(_ + _)))
+ val partitionSums = nums.mapPartitions(iter => Iterator(iter.sum))
assert(partitionSums.collect().toList === List(3, 7))
val partitionSumsWithSplit = nums.mapPartitionsWithIndex {
- case(split, iter) => Iterator((split, iter.reduceLeft(_ + _)))
+ case(split, iter) => Iterator((split, iter.sum))
}
assert(partitionSumsWithSplit.collect().toList === List((0, 3), (1, 7)))
val partitionSumsWithIndex = nums.mapPartitionsWithIndex {
- case(split, iter) => Iterator((split, iter.reduceLeft(_ + _)))
+ case(split, iter) => Iterator((split, iter.sum))
}
assert(partitionSumsWithIndex.collect().toList === List((0, 3), (1, 7)))
diff --git a/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
index 56e0f01b3b..759d52fca5 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
@@ -79,7 +79,7 @@ class MapStatusSuite extends SparkFunSuite {
test("HighlyCompressedMapStatus: estimated size should be the average non-empty block size") {
val sizes = Array.tabulate[Long](3000) { i => i.toLong }
- val avg = sizes.sum / sizes.filter(_ != 0).length
+ val avg = sizes.sum / sizes.count(_ != 0)
val loc = BlockManagerId("a", "b", 10)
val status = MapStatus(loc, sizes)
val status1 = compressAndDecompressMapStatus(status)
diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
index b5385c11a9..935e280e60 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
@@ -243,7 +243,7 @@ class TaskResultGetterSuite extends SparkFunSuite with BeforeAndAfter with Local
val resAfter = captor.getValue
val resSizeBefore = resBefore.accumUpdates.find(_.name == Some(RESULT_SIZE)).flatMap(_.update)
val resSizeAfter = resAfter.accumUpdates.find(_.name == Some(RESULT_SIZE)).flatMap(_.update)
- assert(resSizeBefore.exists(_ == 0L))
+ assert(resSizeBefore.contains(0L))
assert(resSizeAfter.exists(_.toString.toLong > 0L))
}
diff --git a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
index fdacd8c9f5..cf9f9da1e6 100644
--- a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
@@ -166,7 +166,7 @@ class BypassMergeSortShuffleWriterSuite extends SparkFunSuite with BeforeAndAfte
writer.stop( /* success = */ true)
assert(temporaryFilesCreated.nonEmpty)
assert(writer.getPartitionLengths.sum === outputFile.length())
- assert(writer.getPartitionLengths.filter(_ == 0L).size === 4) // should be 4 zero length files
+ assert(writer.getPartitionLengths.count(_ == 0L) === 4) // should be 4 zero length files
assert(temporaryFilesCreated.count(_.exists()) === 0) // check that temporary files were deleted
val shuffleWriteMetrics = taskContext.taskMetrics().shuffleWriteMetrics.get
assert(shuffleWriteMetrics.bytesWritten === outputFile.length())
diff --git a/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala b/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
index ddd5edf4f7..0c8b8cfdd5 100644
--- a/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
+++ b/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
@@ -23,9 +23,7 @@ import org.apache.spark.SparkConf
* Customized SparkConf that allows env variables to be overridden.
*/
class SparkConfWithEnv(env: Map[String, String]) extends SparkConf(false) {
- override def getenv(name: String): String = {
- env.get(name).getOrElse(super.getenv(name))
- }
+ override def getenv(name: String): String = env.getOrElse(name, super.getenv(name))
override def clone: SparkConf = {
new SparkConfWithEnv(env).setAll(getAll)