aboutsummaryrefslogtreecommitdiff
path: root/core/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'core/src/test')
-rw-r--r--core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java36
-rw-r--r--core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java2
-rw-r--r--core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java6
-rw-r--r--core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java22
-rw-r--r--core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java3
-rw-r--r--core/src/test/scala/org/apache/spark/AccumulatorSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/SparkConfSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala12
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala4
13 files changed, 50 insertions, 51 deletions
diff --git a/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java b/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java
index 776a2997cf..127789b632 100644
--- a/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java
+++ b/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java
@@ -73,37 +73,37 @@ public class TaskMemoryManagerSuite {
TestMemoryConsumer c1 = new TestMemoryConsumer(manager);
TestMemoryConsumer c2 = new TestMemoryConsumer(manager);
c1.use(100);
- assert(c1.getUsed() == 100);
+ Assert.assertEquals(100, c1.getUsed());
c2.use(100);
- assert(c2.getUsed() == 100);
- assert(c1.getUsed() == 0); // spilled
+ Assert.assertEquals(100, c2.getUsed());
+ Assert.assertEquals(0, c1.getUsed()); // spilled
c1.use(100);
- assert(c1.getUsed() == 100);
- assert(c2.getUsed() == 0); // spilled
+ Assert.assertEquals(100, c1.getUsed());
+ Assert.assertEquals(0, c2.getUsed()); // spilled
c1.use(50);
- assert(c1.getUsed() == 50); // spilled
- assert(c2.getUsed() == 0);
+ Assert.assertEquals(50, c1.getUsed()); // spilled
+ Assert.assertEquals(0, c2.getUsed());
c2.use(50);
- assert(c1.getUsed() == 50);
- assert(c2.getUsed() == 50);
+ Assert.assertEquals(50, c1.getUsed());
+ Assert.assertEquals(50, c2.getUsed());
c1.use(100);
- assert(c1.getUsed() == 100);
- assert(c2.getUsed() == 0); // spilled
+ Assert.assertEquals(100, c1.getUsed());
+ Assert.assertEquals(0, c2.getUsed()); // spilled
c1.free(20);
- assert(c1.getUsed() == 80);
+ Assert.assertEquals(80, c1.getUsed());
c2.use(10);
- assert(c1.getUsed() == 80);
- assert(c2.getUsed() == 10);
+ Assert.assertEquals(80, c1.getUsed());
+ Assert.assertEquals(10, c2.getUsed());
c2.use(100);
- assert(c2.getUsed() == 100);
- assert(c1.getUsed() == 0); // spilled
+ Assert.assertEquals(100, c2.getUsed());
+ Assert.assertEquals(0, c1.getUsed()); // spilled
c1.free(0);
c2.free(100);
- assert(manager.cleanUpAllAllocatedMemory() == 0);
+ Assert.assertEquals(0, manager.cleanUpAllAllocatedMemory());
}
@Test
@@ -114,7 +114,7 @@ public class TaskMemoryManagerSuite {
.set("spark.unsafe.offHeap", "true")
.set("spark.memory.offHeap.size", "1000");
final TaskMemoryManager manager = new TaskMemoryManager(new TestMemoryManager(conf), 0);
- assert(manager.tungstenMemoryMode == MemoryMode.OFF_HEAP);
+ Assert.assertSame(MemoryMode.OFF_HEAP, manager.tungstenMemoryMode);
}
}
diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java
index eb1da8e1b4..b4fa33f32a 100644
--- a/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java
+++ b/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java
@@ -48,7 +48,7 @@ public class ShuffleInMemorySorterSuite {
public void testSortingEmptyInput() {
final ShuffleInMemorySorter sorter = new ShuffleInMemorySorter(consumer, 100);
final ShuffleInMemorySorter.ShuffleSorterIterator iter = sorter.getSortedIterator();
- assert(!iter.hasNext());
+ Assert.assertFalse(iter.hasNext());
}
@Test
diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
index 876c3a2283..add9d937d3 100644
--- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
+++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
@@ -139,7 +139,7 @@ public class UnsafeShuffleWriterSuite {
new Answer<InputStream>() {
@Override
public InputStream answer(InvocationOnMock invocation) throws Throwable {
- assert (invocation.getArguments()[0] instanceof TempShuffleBlockId);
+ assertTrue(invocation.getArguments()[0] instanceof TempShuffleBlockId);
InputStream is = (InputStream) invocation.getArguments()[1];
if (conf.getBoolean("spark.shuffle.compress", true)) {
return CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(is);
@@ -154,7 +154,7 @@ public class UnsafeShuffleWriterSuite {
new Answer<OutputStream>() {
@Override
public OutputStream answer(InvocationOnMock invocation) throws Throwable {
- assert (invocation.getArguments()[0] instanceof TempShuffleBlockId);
+ assertTrue(invocation.getArguments()[0] instanceof TempShuffleBlockId);
OutputStream os = (OutputStream) invocation.getArguments()[1];
if (conf.getBoolean("spark.shuffle.compress", true)) {
return CompressionCodec$.MODULE$.createCodec(conf).compressedOutputStream(os);
@@ -252,7 +252,7 @@ public class UnsafeShuffleWriterSuite {
createWriter(false).stop(false);
}
- class PandaException extends RuntimeException {
+ static class PandaException extends RuntimeException {
}
@Test(expected=PandaException.class)
diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
index 32f5a1a7e6..492fe49ba4 100644
--- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
+++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
@@ -323,23 +323,23 @@ public class UnsafeExternalSorterSuite {
record[0] = (long) i;
sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0);
}
- assert(sorter.getNumberOfAllocatedPages() >= 2);
+ assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
UnsafeExternalSorter.SpillableIterator iter =
(UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
int lastv = 0;
for (int i = 0; i < n / 3; i++) {
iter.hasNext();
iter.loadNext();
- assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+ assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
lastv = i;
}
- assert(iter.spill() > 0);
- assert(iter.spill() == 0);
- assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
+ assertTrue(iter.spill() > 0);
+ assertEquals(0, iter.spill());
+ assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
for (int i = n / 3; i < n; i++) {
iter.hasNext();
iter.loadNext();
- assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+ assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
@@ -355,15 +355,15 @@ public class UnsafeExternalSorterSuite {
record[0] = (long) i;
sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0);
}
- assert(sorter.getNumberOfAllocatedPages() >= 2);
+ assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
UnsafeExternalSorter.SpillableIterator iter =
(UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
- assert(iter.spill() > 0);
- assert(iter.spill() == 0);
+ assertTrue(iter.spill() > 0);
+ assertEquals(0, iter.spill());
for (int i = 0; i < n; i++) {
iter.hasNext();
iter.loadNext();
- assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+ assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
@@ -394,7 +394,7 @@ public class UnsafeExternalSorterSuite {
for (int i = 0; i < n; i++) {
iter.hasNext();
iter.loadNext();
- assert(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
+ assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java
index 8e557ec0ab..ff41768df1 100644
--- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java
+++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java
@@ -19,6 +19,7 @@ package org.apache.spark.util.collection.unsafe.sort;
import java.util.Arrays;
+import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.HashPartitioner;
@@ -54,7 +55,7 @@ public class UnsafeInMemorySorterSuite {
mock(PrefixComparator.class),
100);
final UnsafeSorterIterator iter = sorter.getSortedIterator();
- assert(!iter.hasNext());
+ Assert.assertFalse(iter.hasNext());
}
@Test
diff --git a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
index 8acd0439b6..4ff8ae57ab 100644
--- a/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
@@ -347,7 +347,7 @@ private class SaveInfoListener extends SparkListener {
def getCompletedStageInfos: Seq[StageInfo] = completedStageInfos.toArray.toSeq
def getCompletedTaskInfos: Seq[TaskInfo] = completedTaskInfos.values.flatten.toSeq
def getCompletedTaskInfos(stageId: StageId, stageAttemptId: StageAttemptId): Seq[TaskInfo] =
- completedTaskInfos.get((stageId, stageAttemptId)).getOrElse(Seq.empty[TaskInfo])
+ completedTaskInfos.getOrElse((stageId, stageAttemptId), Seq.empty[TaskInfo])
/**
* If `jobCompletionCallback` is set, block until the next call has finished.
diff --git a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
index 2fe99e3f81..79881f30b2 100644
--- a/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkConfSuite.scala
@@ -237,7 +237,7 @@ class SparkConfSuite extends SparkFunSuite with LocalSparkContext with ResetSyst
conf.set(newName, "4")
assert(conf.get(newName) === "4")
- val count = conf.getAll.filter { case (k, v) => k.startsWith("spark.history.") }.size
+ val count = conf.getAll.count { case (k, v) => k.startsWith("spark.history.") }
assert(count === 4)
conf.set("spark.yarn.applicationMaster.waitTries", "42")
diff --git a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
index e7cc1617cd..31ce9483cf 100644
--- a/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala
@@ -101,7 +101,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1 until 100
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+ assert(slices.map(_.size).sum === 99)
assert(slices.forall(_.isInstanceOf[Range]))
}
@@ -109,7 +109,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1 to 100
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+ assert(slices.map(_.size).sum === 100)
assert(slices.forall(_.isInstanceOf[Range]))
}
@@ -202,7 +202,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1L until 100L
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+ assert(slices.map(_.size).sum === 99)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
@@ -210,7 +210,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1L to 100L
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+ assert(slices.map(_.size).sum === 100)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
@@ -218,7 +218,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1.0 until 100.0 by 1.0
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 99)
+ assert(slices.map(_.size).sum === 99)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
@@ -226,7 +226,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
val data = 1.0 to 100.0 by 1.0
val slices = ParallelCollectionRDD.slice(data, 3)
assert(slices.size === 3)
- assert(slices.map(_.size).reduceLeft(_ + _) === 100)
+ assert(slices.map(_.size).sum === 100)
assert(slices.forall(_.isInstanceOf[NumericRange[_]]))
}
diff --git a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
index 80347b800a..24daedab20 100644
--- a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
@@ -54,16 +54,16 @@ class RDDSuite extends SparkFunSuite with SharedSparkContext {
assert(!nums.isEmpty())
assert(nums.max() === 4)
assert(nums.min() === 1)
- val partitionSums = nums.mapPartitions(iter => Iterator(iter.reduceLeft(_ + _)))
+ val partitionSums = nums.mapPartitions(iter => Iterator(iter.sum))
assert(partitionSums.collect().toList === List(3, 7))
val partitionSumsWithSplit = nums.mapPartitionsWithIndex {
- case(split, iter) => Iterator((split, iter.reduceLeft(_ + _)))
+ case(split, iter) => Iterator((split, iter.sum))
}
assert(partitionSumsWithSplit.collect().toList === List((0, 3), (1, 7)))
val partitionSumsWithIndex = nums.mapPartitionsWithIndex {
- case(split, iter) => Iterator((split, iter.reduceLeft(_ + _)))
+ case(split, iter) => Iterator((split, iter.sum))
}
assert(partitionSumsWithIndex.collect().toList === List((0, 3), (1, 7)))
diff --git a/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
index 56e0f01b3b..759d52fca5 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala
@@ -79,7 +79,7 @@ class MapStatusSuite extends SparkFunSuite {
test("HighlyCompressedMapStatus: estimated size should be the average non-empty block size") {
val sizes = Array.tabulate[Long](3000) { i => i.toLong }
- val avg = sizes.sum / sizes.filter(_ != 0).length
+ val avg = sizes.sum / sizes.count(_ != 0)
val loc = BlockManagerId("a", "b", 10)
val status = MapStatus(loc, sizes)
val status1 = compressAndDecompressMapStatus(status)
diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
index b5385c11a9..935e280e60 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/TaskResultGetterSuite.scala
@@ -243,7 +243,7 @@ class TaskResultGetterSuite extends SparkFunSuite with BeforeAndAfter with Local
val resAfter = captor.getValue
val resSizeBefore = resBefore.accumUpdates.find(_.name == Some(RESULT_SIZE)).flatMap(_.update)
val resSizeAfter = resAfter.accumUpdates.find(_.name == Some(RESULT_SIZE)).flatMap(_.update)
- assert(resSizeBefore.exists(_ == 0L))
+ assert(resSizeBefore.contains(0L))
assert(resSizeAfter.exists(_.toString.toLong > 0L))
}
diff --git a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
index fdacd8c9f5..cf9f9da1e6 100644
--- a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala
@@ -166,7 +166,7 @@ class BypassMergeSortShuffleWriterSuite extends SparkFunSuite with BeforeAndAfte
writer.stop( /* success = */ true)
assert(temporaryFilesCreated.nonEmpty)
assert(writer.getPartitionLengths.sum === outputFile.length())
- assert(writer.getPartitionLengths.filter(_ == 0L).size === 4) // should be 4 zero length files
+ assert(writer.getPartitionLengths.count(_ == 0L) === 4) // should be 4 zero length files
assert(temporaryFilesCreated.count(_.exists()) === 0) // check that temporary files were deleted
val shuffleWriteMetrics = taskContext.taskMetrics().shuffleWriteMetrics.get
assert(shuffleWriteMetrics.bytesWritten === outputFile.length())
diff --git a/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala b/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
index ddd5edf4f7..0c8b8cfdd5 100644
--- a/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
+++ b/core/src/test/scala/org/apache/spark/util/SparkConfWithEnv.scala
@@ -23,9 +23,7 @@ import org.apache.spark.SparkConf
* Customized SparkConf that allows env variables to be overridden.
*/
class SparkConfWithEnv(env: Map[String, String]) extends SparkConf(false) {
- override def getenv(name: String): String = {
- env.get(name).getOrElse(super.getenv(name))
- }
+ override def getenv(name: String): String = env.getOrElse(name, super.getenv(name))
override def clone: SparkConf = {
new SparkConfWithEnv(env).setAll(getAll)