aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2016-02-18 12:14:30 -0800
committerAndrew Or <andrew@databricks.com>2016-02-18 12:14:30 -0800
commit78562535feb6e214520b29e0bbdd4b1302f01e93 (patch)
treef3d776010ebdffbe0a8457271d495943800a404e /core
parent892b2dd6dd00d2088c75ab3c8443e8b3e44e5803 (diff)
downloadspark-78562535feb6e214520b29e0bbdd4b1302f01e93.tar.gz
spark-78562535feb6e214520b29e0bbdd4b1302f01e93.tar.bz2
spark-78562535feb6e214520b29e0bbdd4b1302f01e93.zip
[SPARK-13371][CORE][STRING] TaskSetManager.dequeueSpeculativeTask compares Option and String directly.
## What changes were proposed in this pull request? Fix some comparisons between unequal types that cause IJ warnings and in at least one case a likely bug (TaskSetManager) ## How was the this patch tested? Running Jenkins tests Author: Sean Owen <sowen@cloudera.com> Closes #11253 from srowen/SPARK-13371.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/CheckpointSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/PartitioningSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala6
5 files changed, 11 insertions, 7 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala b/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala
index 15d220d01b..434aadd2c6 100644
--- a/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala
@@ -252,7 +252,7 @@ private object FaultToleranceTest extends App with Logging {
val f = Future {
try {
val res = sc.parallelize(0 until 10).collect()
- assertTrue(res.toList == (0 until 10))
+ assertTrue(res.toList == (0 until 10).toList)
true
} catch {
case e: Exception =>
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
index 05cfa52f16..2b0eab7169 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
@@ -338,7 +338,7 @@ private[spark] class TaskSetManager(
if (TaskLocality.isAllowed(locality, TaskLocality.RACK_LOCAL)) {
for (rack <- sched.getRackForHost(host)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
- val racks = tasks(index).preferredLocations.map(_.host).map(sched.getRackForHost)
+ val racks = tasks(index).preferredLocations.map(_.host).flatMap(sched.getRackForHost)
if (racks.contains(rack)) {
speculatableTasks -= index
return Some((index, TaskLocality.RACK_LOCAL))
diff --git a/core/src/test/scala/org/apache/spark/CheckpointSuite.scala b/core/src/test/scala/org/apache/spark/CheckpointSuite.scala
index ce35856dce..9f94e36324 100644
--- a/core/src/test/scala/org/apache/spark/CheckpointSuite.scala
+++ b/core/src/test/scala/org/apache/spark/CheckpointSuite.scala
@@ -54,7 +54,7 @@ trait RDDCheckpointTester { self: SparkFunSuite =>
// Generate the final RDD using given RDD operation
val baseRDD = generateFatRDD()
val operatedRDD = op(baseRDD)
- val parentRDD = operatedRDD.dependencies.headOption.orNull
+ val parentDependency = operatedRDD.dependencies.headOption.orNull
val rddType = operatedRDD.getClass.getSimpleName
val numPartitions = operatedRDD.partitions.length
@@ -82,7 +82,7 @@ trait RDDCheckpointTester { self: SparkFunSuite =>
}
// Test whether dependencies have been changed from its earlier parent RDD
- assert(operatedRDD.dependencies.head.rdd != parentRDD)
+ assert(operatedRDD.dependencies.head != parentDependency)
// Test whether the partitions have been changed from its earlier partitions
assert(operatedRDD.partitions.toList != partitionsBeforeCheckpoint.toList)
diff --git a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
index aa8028792c..3d31c7864e 100644
--- a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
+++ b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
@@ -163,8 +163,8 @@ class PartitioningSuite extends SparkFunSuite with SharedSparkContext with Priva
val hashP2 = new HashPartitioner(2)
assert(rangeP2 === rangeP2)
assert(hashP2 === hashP2)
- assert(hashP2 != rangeP2)
- assert(rangeP2 != hashP2)
+ assert(hashP2 !== rangeP2)
+ assert(rangeP2 !== hashP2)
}
test("partitioner preservation") {
diff --git a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
index aa22f3ba2b..b0a35fe8c3 100644
--- a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
@@ -289,7 +289,11 @@ class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers with B
JInt(stageId) <- stage \ "stageId"
JInt(attemptId) <- stage \ "attemptId"
} {
- val exp = if (attemptId == 0 && stageId == 1) StageStatus.FAILED else StageStatus.COMPLETE
+ val exp = if (attemptId.toInt == 0 && stageId.toInt == 1) {
+ StageStatus.FAILED
+ } else {
+ StageStatus.COMPLETE
+ }
status should be (exp.name())
}