aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2016-06-14 09:40:07 -0700
committerWenchen Fan <wenchen@databricks.com>2016-06-14 09:40:07 -0700
commit6151d2641f91c8e3ec0c324e78afb46cdb2ef111 (patch)
treee183d9aa95c468b340d8e0b1b4a19ca0982a4148 /core/src
parent6e8cdef0cf36f6e921d9e1a65c61b66196935820 (diff)
downloadspark-6151d2641f91c8e3ec0c324e78afb46cdb2ef111.tar.gz
spark-6151d2641f91c8e3ec0c324e78afb46cdb2ef111.tar.bz2
spark-6151d2641f91c8e3ec0c324e78afb46cdb2ef111.zip
[MINOR] Clean up several build warnings, mostly due to internal use of old accumulators
## What changes were proposed in this pull request? Another PR to clean up recent build warnings. This particularly cleans up several instances of the old accumulator API usage in tests that are straightforward to update. I think this qualifies as "minor". ## How was this patch tested? Jenkins Author: Sean Owen <sowen@cloudera.com> Closes #13642 from srowen/BuildWarnings.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala12
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala1
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala9
3 files changed, 10 insertions, 12 deletions
diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
index f28f429e0c..3c30ec8ee8 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
@@ -1602,13 +1602,11 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
- val acc = new Accumulator[Int](0, new AccumulatorParam[Int] {
- override def addAccumulator(t1: Int, t2: Int): Int = t1 + t2
- override def zero(initialValue: Int): Int = 0
- override def addInPlace(r1: Int, r2: Int): Int = {
- throw new DAGSchedulerSuiteDummyException
- }
- })
+ val acc = new LongAccumulator {
+ override def add(v: java.lang.Long): Unit = throw new DAGSchedulerSuiteDummyException
+ override def add(v: Long): Unit = throw new DAGSchedulerSuiteDummyException
+ }
+ sc.register(acc)
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
diff --git a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
index 5271a5671a..54b7312991 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
@@ -22,6 +22,7 @@ import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.{Duration, SECONDS}
+import scala.language.existentials
import scala.reflect.ClassTag
import org.scalactic.TripleEquals
diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala
index 368668bc7e..9eda79ace1 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala
@@ -146,14 +146,13 @@ class TaskContextSuite extends SparkFunSuite with BeforeAndAfter with LocalSpark
test("accumulators are updated on exception failures") {
// This means use 1 core and 4 max task failures
sc = new SparkContext("local[1,4]", "test")
- val param = AccumulatorParam.LongAccumulatorParam
// Create 2 accumulators, one that counts failed values and another that doesn't
- val acc1 = new Accumulator(0L, param, Some("x"), countFailedValues = true)
- val acc2 = new Accumulator(0L, param, Some("y"), countFailedValues = false)
+ val acc1 = AccumulatorSuite.createLongAccum("x", true)
+ val acc2 = AccumulatorSuite.createLongAccum("y", false)
// Fail first 3 attempts of every task. This means each task should be run 4 times.
sc.parallelize(1 to 10, 10).map { i =>
- acc1 += 1
- acc2 += 1
+ acc1.add(1)
+ acc2.add(1)
if (TaskContext.get.attemptNumber() <= 2) {
throw new Exception("you did something wrong")
} else {