aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/Client.scala14
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala14
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/PartitioningSuite.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala4
9 files changed, 29 insertions, 29 deletions
diff --git a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
index c416e835a9..7d5348266b 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
@@ -198,7 +198,7 @@ private[r] class RBackendHandler(server: RBackend)
args: Array[Object]): Option[Int] = {
val numArgs = args.length
- for (index <- 0 until parameterTypesOfMethods.length) {
+ for (index <- parameterTypesOfMethods.indices) {
val parameterTypes = parameterTypesOfMethods(index)
if (parameterTypes.length == numArgs) {
@@ -240,7 +240,7 @@ private[r] class RBackendHandler(server: RBackend)
// Convert args if needed
val parameterTypes = parameterTypesOfMethods(index)
- (0 until numArgs).map { i =>
+ for (i <- 0 until numArgs) {
if (parameterTypes(i) == classOf[Seq[Any]] && args(i).getClass.isArray) {
// Convert a Java array to scala Seq
args(i) = args(i).asInstanceOf[Array[_]].toSeq
diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala b/core/src/main/scala/org/apache/spark/deploy/Client.scala
index 640f25f504..bf2dab6e71 100644
--- a/core/src/main/scala/org/apache/spark/deploy/Client.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala
@@ -116,7 +116,7 @@ private class ClientEndpoint(
}
/* Find out driver status then exit the JVM */
- def pollAndReportStatus(driverId: String) {
+ def pollAndReportStatus(driverId: String): Unit = {
// Since ClientEndpoint is the only RpcEndpoint in the process, blocking the event loop thread
// is fine.
logInfo("... waiting before polling master for driver state")
@@ -137,12 +137,14 @@ private class ClientEndpoint(
case _ =>
}
// Exception, if present
- statusResponse.exception.map { e =>
- logError(s"Exception from cluster was: $e")
- e.printStackTrace()
- System.exit(-1)
+ statusResponse.exception match {
+ case Some(e) =>
+ logError(s"Exception from cluster was: $e")
+ e.printStackTrace()
+ System.exit(-1)
+ case _ =>
+ System.exit(0)
}
- System.exit(0)
}
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
index 2ec9846e33..9c198a61f3 100644
--- a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
@@ -183,14 +183,14 @@ private class DefaultPartitionCoalescer(val balanceSlack: Double = 0.10)
getAllPrefLocs(prev)
- // gets all the preffered locations of the previous RDD and splits them into partitions
+ // gets all the preferred locations of the previous RDD and splits them into partitions
// with preferred locations and ones without
- def getAllPrefLocs(prev: RDD[_]) {
+ def getAllPrefLocs(prev: RDD[_]): Unit = {
val tmpPartsWithLocs = mutable.LinkedHashMap[Partition, Seq[String]]()
// first get the locations for each partition, only do this once since it can be expensive
prev.partitions.foreach(p => {
val locs = currPrefLocs(p, prev)
- if (locs.size > 0) {
+ if (locs.nonEmpty) {
tmpPartsWithLocs.put(p, locs)
} else {
partsWithoutLocs += p
@@ -198,13 +198,13 @@ private class DefaultPartitionCoalescer(val balanceSlack: Double = 0.10)
}
)
// convert it into an array of host to partition
- (0 to 2).map(x =>
- tmpPartsWithLocs.foreach(parts => {
+ for (x <- 0 to 2) {
+ tmpPartsWithLocs.foreach { parts =>
val p = parts._1
val locs = parts._2
if (locs.size > x) partsWithLocs += ((locs(x), p))
- } )
- )
+ }
+ }
}
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index 515fd6f4e2..99afe0250c 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -155,7 +155,7 @@ class HadoopRDD[K, V](
logDebug("Cloning Hadoop Configuration")
val newJobConf = new JobConf(conf)
if (!conf.isInstanceOf[JobConf]) {
- initLocalJobConfFuncOpt.map(f => f(newJobConf))
+ initLocalJobConfFuncOpt.foreach(f => f(newJobConf))
}
newJobConf
}
@@ -174,7 +174,7 @@ class HadoopRDD[K, V](
HadoopRDD.CONFIGURATION_INSTANTIATION_LOCK.synchronized {
logDebug("Creating new JobConf and caching it for later re-use")
val newJobConf = new JobConf(conf)
- initLocalJobConfFuncOpt.map(f => f(newJobConf))
+ initLocalJobConfFuncOpt.foreach(f => f(newJobConf))
HadoopRDD.putCachedMetadata(jobConfCacheKey, newJobConf)
newJobConf
}
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
index 959d6fd46d..263e6197a6 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
@@ -220,9 +220,7 @@ private[spark] class MesosCoarseGrainedSchedulerBackend(
command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get))
}
- conf.getOption("spark.mesos.uris").map { uris =>
- setupUris(uris, command)
- }
+ conf.getOption("spark.mesos.uris").foreach(setupUris(_, command))
command.build()
}
diff --git a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
index 939f12f94f..b9d18119b5 100644
--- a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
@@ -30,11 +30,11 @@ class ImplicitOrderingSuite extends SparkFunSuite with LocalSparkContext {
// Infer orderings after basic maps to particular types
val basicMapExpectations = ImplicitOrderingSuite.basicMapExpectations(rdd)
- basicMapExpectations.map({case (met, explain) => assert(met, explain)})
+ basicMapExpectations.foreach { case (met, explain) => assert(met, explain) }
// Infer orderings for other RDD methods
val otherRDDMethodExpectations = ImplicitOrderingSuite.otherRDDMethodExpectations(rdd)
- otherRDDMethodExpectations.map({case (met, explain) => assert(met, explain)})
+ otherRDDMethodExpectations.foreach { case (met, explain) => assert(met, explain) }
}
}
diff --git a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
index c5d4968ef7..34c017806f 100644
--- a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
+++ b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
@@ -71,9 +71,9 @@ class PartitioningSuite extends SparkFunSuite with SharedSparkContext with Priva
val partitionSizes = List(1, 2, 10, 100, 500, 1000, 1500)
val partitioners = partitionSizes.map(p => (p, new RangePartitioner(p, rdd)))
val decoratedRangeBounds = PrivateMethod[Array[Int]]('rangeBounds)
- partitioners.map { case (numPartitions, partitioner) =>
+ partitioners.foreach { case (numPartitions, partitioner) =>
val rangeBounds = partitioner.invokePrivate(decoratedRangeBounds())
- 1.to(1000).map { element => {
+ for (element <- 1 to 1000) {
val partition = partitioner.getPartition(element)
if (numPartitions > 1) {
if (partition < rangeBounds.size) {
@@ -85,7 +85,7 @@ class PartitioningSuite extends SparkFunSuite with SharedSparkContext with Priva
} else {
assert(partition === 0)
}
- }}
+ }
}
}
diff --git a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
index f8d523fa2c..59b90974ae 100644
--- a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
@@ -96,7 +96,7 @@ class PipedRDDSuite extends SparkFunSuite with SharedSparkContext {
val piped = nums.pipe(Seq("cat"),
Map[String, String](),
(f: String => Unit) => {
- bl.value.map(f(_)); f("\u0001")
+ bl.value.foreach(f); f("\u0001")
},
(i: Int, f: String => Unit) => f(i + "_"))
@@ -117,7 +117,7 @@ class PipedRDDSuite extends SparkFunSuite with SharedSparkContext {
pipe(Seq("cat"),
Map[String, String](),
(f: String => Unit) => {
- bl.value.map(f(_)); f("\u0001")
+ bl.value.foreach(f); f("\u0001")
},
(i: Tuple2[String, Iterable[String]], f: String => Unit) => {
for (e <- i._2) {
diff --git a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala
index 25fc15dd54..fd9add7690 100644
--- a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala
@@ -171,8 +171,8 @@ class TimeStampedHashMapSuite extends SparkFunSuite {
})
test(name + " - threading safety test") {
- threads.map(_.start)
- threads.map(_.join)
+ threads.foreach(_.start())
+ threads.foreach(_.join())
assert(!error)
}
}