aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/pom.xml6
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala12
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala1
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala9
4 files changed, 13 insertions, 15 deletions
diff --git a/core/pom.xml b/core/pom.xml
index f5fdb40696..90c8f97f2b 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -356,12 +356,12 @@
<phase>generate-resources</phase>
<configuration>
<!-- Execute the shell script to generate the spark build information. -->
- <tasks>
+ <target>
<exec executable="${project.basedir}/../build/spark-build-info">
<arg value="${project.build.directory}/extra-resources"/>
- <arg value="${pom.version}"/>
+ <arg value="${project.version}"/>
</exec>
- </tasks>
+ </target>
</configuration>
<goals>
<goal>run</goal>
diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
index f28f429e0c..3c30ec8ee8 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
@@ -1602,13 +1602,11 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
- val acc = new Accumulator[Int](0, new AccumulatorParam[Int] {
- override def addAccumulator(t1: Int, t2: Int): Int = t1 + t2
- override def zero(initialValue: Int): Int = 0
- override def addInPlace(r1: Int, r2: Int): Int = {
- throw new DAGSchedulerSuiteDummyException
- }
- })
+ val acc = new LongAccumulator {
+ override def add(v: java.lang.Long): Unit = throw new DAGSchedulerSuiteDummyException
+ override def add(v: Long): Unit = throw new DAGSchedulerSuiteDummyException
+ }
+ sc.register(acc)
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
diff --git a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
index 5271a5671a..54b7312991 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
@@ -22,6 +22,7 @@ import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.{Duration, SECONDS}
+import scala.language.existentials
import scala.reflect.ClassTag
import org.scalactic.TripleEquals
diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala
index 368668bc7e..9eda79ace1 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala
@@ -146,14 +146,13 @@ class TaskContextSuite extends SparkFunSuite with BeforeAndAfter with LocalSpark
test("accumulators are updated on exception failures") {
// This means use 1 core and 4 max task failures
sc = new SparkContext("local[1,4]", "test")
- val param = AccumulatorParam.LongAccumulatorParam
// Create 2 accumulators, one that counts failed values and another that doesn't
- val acc1 = new Accumulator(0L, param, Some("x"), countFailedValues = true)
- val acc2 = new Accumulator(0L, param, Some("y"), countFailedValues = false)
+ val acc1 = AccumulatorSuite.createLongAccum("x", true)
+ val acc2 = AccumulatorSuite.createLongAccum("y", false)
// Fail first 3 attempts of every task. This means each task should be run 4 times.
sc.parallelize(1 to 10, 10).map { i =>
- acc1 += 1
- acc2 += 1
+ acc1.add(1)
+ acc2.add(1)
if (TaskContext.get.attemptNumber() <= 2) {
throw new Exception("you did something wrong")
} else {