From 0dc4310b470c7e4355c0da67ca3373c3013cc9dd Mon Sep 17 00:00:00 2001 From: Sean Owen Date: Sat, 30 Jul 2016 04:42:38 -0700 Subject: [SPARK-16694][CORE] Use for/foreach rather than map for Unit expressions whose side effects are required ## What changes were proposed in this pull request? Use foreach/for instead of map where operation requires execution of body, not actually defining a transformation ## How was this patch tested? Jenkins Author: Sean Owen Closes #14332 from srowen/SPARK-16694. --- .../scala/org/apache/spark/api/r/RBackendHandler.scala | 4 ++-- core/src/main/scala/org/apache/spark/deploy/Client.scala | 14 ++++++++------ .../src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala | 14 +++++++------- core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala | 4 ++-- .../cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala | 4 +--- .../scala/org/apache/spark/ImplicitOrderingSuite.scala | 4 ++-- .../test/scala/org/apache/spark/PartitioningSuite.scala | 6 +++--- .../test/scala/org/apache/spark/rdd/PipedRDDSuite.scala | 4 ++-- .../org/apache/spark/util/TimeStampedHashMapSuite.scala | 4 ++-- .../org/apache/spark/examples/ml/DataFrameExample.scala | 9 ++++----- .../org/apache/spark/examples/ml/DecisionTreeExample.scala | 9 ++++----- .../scala/org/apache/spark/examples/ml/GBTExample.scala | 9 ++++----- .../apache/spark/examples/ml/LinearRegressionExample.scala | 9 ++++----- .../spark/examples/ml/LogisticRegressionExample.scala | 9 ++++----- .../org/apache/spark/examples/ml/RandomForestExample.scala | 9 ++++----- .../apache/spark/examples/mllib/BinaryClassification.scala | 9 ++++----- .../org/apache/spark/examples/mllib/Correlations.scala | 9 ++++----- .../org/apache/spark/examples/mllib/CosineSimilarity.scala | 9 ++++----- .../apache/spark/examples/mllib/DecisionTreeRunner.scala | 9 ++++----- .../org/apache/spark/examples/mllib/DenseKMeans.scala | 9 ++++----- .../org/apache/spark/examples/mllib/FPGrowthExample.scala | 9 ++++----- .../spark/examples/mllib/GradientBoostedTreesRunner.scala | 9 ++++----- .../scala/org/apache/spark/examples/mllib/LDAExample.scala | 10 ++++------ .../org/apache/spark/examples/mllib/LinearRegression.scala | 9 ++++----- .../org/apache/spark/examples/mllib/MovieLensALS.scala | 9 ++++----- .../spark/examples/mllib/MultivariateSummarizer.scala | 9 ++++----- .../examples/mllib/PowerIterationClusteringExample.scala | 9 ++++----- .../org/apache/spark/examples/mllib/SampledRDDs.scala | 9 ++++----- .../org/apache/spark/examples/mllib/SparseNaiveBayes.scala | 9 ++++----- .../test/scala/org/apache/spark/graphx/GraphSuite.scala | 2 +- .../scala/org/apache/spark/mllib/clustering/KMeans.scala | 2 +- .../scala/org/apache/spark/ml/feature/Word2VecSuite.scala | 4 ++-- .../spark/mllib/classification/NaiveBayesSuite.scala | 2 +- .../spark/mllib/random/RandomDataGeneratorSuite.scala | 6 +++--- .../spark/sql/catalyst/expressions/PredicateSuite.scala | 4 ++-- .../columnar/compression/CompressionSchemeBenchmark.scala | 4 ++-- .../columnar/compression/IntegralDeltaSuite.scala | 2 +- .../execution/datasources/FileSourceStrategySuite.scala | 4 ++-- .../apache/spark/streaming/ReceiverInputDStreamSuite.scala | 2 +- 39 files changed, 125 insertions(+), 146 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala index c416e835a9..7d5348266b 100644 --- a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala +++ b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala @@ -198,7 +198,7 @@ private[r] class RBackendHandler(server: RBackend) args: Array[Object]): Option[Int] = { val numArgs = args.length - for (index <- 0 until parameterTypesOfMethods.length) { + for (index <- parameterTypesOfMethods.indices) { val parameterTypes = parameterTypesOfMethods(index) if (parameterTypes.length == numArgs) { @@ -240,7 +240,7 @@ private[r] class RBackendHandler(server: RBackend) // Convert args if needed val parameterTypes = parameterTypesOfMethods(index) - (0 until numArgs).map { i => + for (i <- 0 until numArgs) { if (parameterTypes(i) == classOf[Seq[Any]] && args(i).getClass.isArray) { // Convert a Java array to scala Seq args(i) = args(i).asInstanceOf[Array[_]].toSeq diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala b/core/src/main/scala/org/apache/spark/deploy/Client.scala index 640f25f504..bf2dab6e71 100644 --- a/core/src/main/scala/org/apache/spark/deploy/Client.scala +++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala @@ -116,7 +116,7 @@ private class ClientEndpoint( } /* Find out driver status then exit the JVM */ - def pollAndReportStatus(driverId: String) { + def pollAndReportStatus(driverId: String): Unit = { // Since ClientEndpoint is the only RpcEndpoint in the process, blocking the event loop thread // is fine. logInfo("... waiting before polling master for driver state") @@ -137,12 +137,14 @@ private class ClientEndpoint( case _ => } // Exception, if present - statusResponse.exception.map { e => - logError(s"Exception from cluster was: $e") - e.printStackTrace() - System.exit(-1) + statusResponse.exception match { + case Some(e) => + logError(s"Exception from cluster was: $e") + e.printStackTrace() + System.exit(-1) + case _ => + System.exit(0) } - System.exit(0) } } diff --git a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala index 2ec9846e33..9c198a61f3 100644 --- a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala @@ -183,14 +183,14 @@ private class DefaultPartitionCoalescer(val balanceSlack: Double = 0.10) getAllPrefLocs(prev) - // gets all the preffered locations of the previous RDD and splits them into partitions + // gets all the preferred locations of the previous RDD and splits them into partitions // with preferred locations and ones without - def getAllPrefLocs(prev: RDD[_]) { + def getAllPrefLocs(prev: RDD[_]): Unit = { val tmpPartsWithLocs = mutable.LinkedHashMap[Partition, Seq[String]]() // first get the locations for each partition, only do this once since it can be expensive prev.partitions.foreach(p => { val locs = currPrefLocs(p, prev) - if (locs.size > 0) { + if (locs.nonEmpty) { tmpPartsWithLocs.put(p, locs) } else { partsWithoutLocs += p @@ -198,13 +198,13 @@ private class DefaultPartitionCoalescer(val balanceSlack: Double = 0.10) } ) // convert it into an array of host to partition - (0 to 2).map(x => - tmpPartsWithLocs.foreach(parts => { + for (x <- 0 to 2) { + tmpPartsWithLocs.foreach { parts => val p = parts._1 val locs = parts._2 if (locs.size > x) partsWithLocs += ((locs(x), p)) - } ) - ) + } + } } } diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala index 515fd6f4e2..99afe0250c 100644 --- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala @@ -155,7 +155,7 @@ class HadoopRDD[K, V]( logDebug("Cloning Hadoop Configuration") val newJobConf = new JobConf(conf) if (!conf.isInstanceOf[JobConf]) { - initLocalJobConfFuncOpt.map(f => f(newJobConf)) + initLocalJobConfFuncOpt.foreach(f => f(newJobConf)) } newJobConf } @@ -174,7 +174,7 @@ class HadoopRDD[K, V]( HadoopRDD.CONFIGURATION_INSTANTIATION_LOCK.synchronized { logDebug("Creating new JobConf and caching it for later re-use") val newJobConf = new JobConf(conf) - initLocalJobConfFuncOpt.map(f => f(newJobConf)) + initLocalJobConfFuncOpt.foreach(f => f(newJobConf)) HadoopRDD.putCachedMetadata(jobConfCacheKey, newJobConf) newJobConf } diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala index 959d6fd46d..263e6197a6 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala @@ -220,9 +220,7 @@ private[spark] class MesosCoarseGrainedSchedulerBackend( command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get)) } - conf.getOption("spark.mesos.uris").map { uris => - setupUris(uris, command) - } + conf.getOption("spark.mesos.uris").foreach(setupUris(_, command)) command.build() } diff --git a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala index 939f12f94f..b9d18119b5 100644 --- a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala +++ b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala @@ -30,11 +30,11 @@ class ImplicitOrderingSuite extends SparkFunSuite with LocalSparkContext { // Infer orderings after basic maps to particular types val basicMapExpectations = ImplicitOrderingSuite.basicMapExpectations(rdd) - basicMapExpectations.map({case (met, explain) => assert(met, explain)}) + basicMapExpectations.foreach { case (met, explain) => assert(met, explain) } // Infer orderings for other RDD methods val otherRDDMethodExpectations = ImplicitOrderingSuite.otherRDDMethodExpectations(rdd) - otherRDDMethodExpectations.map({case (met, explain) => assert(met, explain)}) + otherRDDMethodExpectations.foreach { case (met, explain) => assert(met, explain) } } } diff --git a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala index c5d4968ef7..34c017806f 100644 --- a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala +++ b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala @@ -71,9 +71,9 @@ class PartitioningSuite extends SparkFunSuite with SharedSparkContext with Priva val partitionSizes = List(1, 2, 10, 100, 500, 1000, 1500) val partitioners = partitionSizes.map(p => (p, new RangePartitioner(p, rdd))) val decoratedRangeBounds = PrivateMethod[Array[Int]]('rangeBounds) - partitioners.map { case (numPartitions, partitioner) => + partitioners.foreach { case (numPartitions, partitioner) => val rangeBounds = partitioner.invokePrivate(decoratedRangeBounds()) - 1.to(1000).map { element => { + for (element <- 1 to 1000) { val partition = partitioner.getPartition(element) if (numPartitions > 1) { if (partition < rangeBounds.size) { @@ -85,7 +85,7 @@ class PartitioningSuite extends SparkFunSuite with SharedSparkContext with Priva } else { assert(partition === 0) } - }} + } } } diff --git a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala index f8d523fa2c..59b90974ae 100644 --- a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala +++ b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala @@ -96,7 +96,7 @@ class PipedRDDSuite extends SparkFunSuite with SharedSparkContext { val piped = nums.pipe(Seq("cat"), Map[String, String](), (f: String => Unit) => { - bl.value.map(f(_)); f("\u0001") + bl.value.foreach(f); f("\u0001") }, (i: Int, f: String => Unit) => f(i + "_")) @@ -117,7 +117,7 @@ class PipedRDDSuite extends SparkFunSuite with SharedSparkContext { pipe(Seq("cat"), Map[String, String](), (f: String => Unit) => { - bl.value.map(f(_)); f("\u0001") + bl.value.foreach(f); f("\u0001") }, (i: Tuple2[String, Iterable[String]], f: String => Unit) => { for (e <- i._2) { diff --git a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala index 25fc15dd54..fd9add7690 100644 --- a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala @@ -171,8 +171,8 @@ class TimeStampedHashMapSuite extends SparkFunSuite { }) test(name + " - threading safety test") { - threads.map(_.start) - threads.map(_.join) + threads.foreach(_.start()) + threads.foreach(_.join()) assert(!error) } } diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala index 38c1c1c186..e07c9a4717 100644 --- a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala @@ -54,14 +54,13 @@ object DataFrameExample { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val spark = SparkSession .builder .appName(s"DataFrameExample with $params") diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala index de4474555d..1745281c26 100644 --- a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala @@ -124,10 +124,9 @@ object DecisionTreeExample { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } @@ -197,7 +196,7 @@ object DecisionTreeExample { (training, test) } - def run(params: Params) { + def run(params: Params): Unit = { val spark = SparkSession .builder .appName(s"DecisionTreeExample with $params") diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala index a4274ae954..db55298d8e 100644 --- a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala @@ -127,14 +127,13 @@ object GBTExample { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val spark = SparkSession .builder .appName(s"GBTExample with $params") diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala index de96fb2979..31ba180335 100644 --- a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala @@ -96,14 +96,13 @@ object LinearRegressionExample { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val spark = SparkSession .builder .appName(s"LinearRegressionExample with $params") diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala index c2a87e1ddf..c67b53899c 100644 --- a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala @@ -103,14 +103,13 @@ object LogisticRegressionExample { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val spark = SparkSession .builder .appName(s"LogisticRegressionExample with $params") diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala index 2419dc49cd..a9e07c0705 100644 --- a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala @@ -133,14 +133,13 @@ object RandomForestExample { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val spark = SparkSession .builder .appName(s"RandomForestExample with $params") diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala index 2282bd2b7d..a1a5b59152 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala @@ -95,14 +95,13 @@ object BinaryClassification { """.stripMargin) } - parser.parse(args, defaultParams).map { params => - run(params) - } getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"BinaryClassification with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala index e003f35ed3..0b44c339ef 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala @@ -56,14 +56,13 @@ object Correlations { """.stripMargin) } - parser.parse(args, defaultParams).map { params => - run(params) - } getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"Correlations with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala index 5ff3d36242..681465d217 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala @@ -68,14 +68,13 @@ object CosineSimilarity { """.stripMargin) } - parser.parse(args, defaultParams).map { params => - run(params) - } getOrElse { - System.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName("CosineSimilarity") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala index a85aa2cac9..0ad0465a02 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala @@ -149,10 +149,9 @@ object DecisionTreeRunner { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } @@ -253,7 +252,7 @@ object DecisionTreeRunner { (training, test, numClasses) } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"DecisionTreeRunner with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala index 380d85d60e..b228827e58 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala @@ -69,14 +69,13 @@ object DenseKMeans { .action((x, c) => c.copy(input = x)) } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"DenseKMeans with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala index a7a3eade04..6435abc127 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala @@ -53,14 +53,13 @@ object FPGrowthExample { .action((x, c) => c.copy(input = x)) } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"FPGrowthExample with $params") val sc = new SparkContext(conf) val transactions = sc.textFile(params.input).map(_.split(" ")).cache() diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala index 90e4687c1f..4020c6b6bc 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala @@ -85,14 +85,13 @@ object GradientBoostedTreesRunner { } } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"GradientBoostedTreesRunner with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala index 3fbf8e0333..7e50b122e6 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala @@ -98,15 +98,13 @@ object LDAExample { .action((x, c) => c.copy(input = c.input :+ x)) } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - parser.showUsageAsError - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - private def run(params: Params) { + private def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"LDAExample with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala index a70203028c..86aec363ea 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala @@ -82,14 +82,13 @@ object LinearRegression { """.stripMargin) } - parser.parse(args, defaultParams).map { params => - run(params) - } getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"LinearRegression with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala index 09750e53cb..9bd6927fb7 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala @@ -89,14 +89,13 @@ object MovieLensALS { """.stripMargin) } - parser.parse(args, defaultParams).map { params => - run(params) - } getOrElse { - System.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"MovieLensALS with $params") if (params.kryo) { conf.registerKryoClasses(Array(classOf[mutable.BitSet], classOf[Rating])) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala index 3c598172da..f9e47e485e 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala @@ -57,14 +57,13 @@ object MultivariateSummarizer { """.stripMargin) } - parser.parse(args, defaultParams).map { params => - run(params) - } getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"MultivariateSummarizer with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala index a81c9b383d..986496c0d9 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala @@ -77,14 +77,13 @@ object PowerIterationClusteringExample { .action((x, c) => c.copy(maxIterations = x)) } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf() .setMaster("local") .setAppName(s"PowerIterationClustering with $params") diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala index 0da4005977..ba3deae5d6 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala @@ -52,14 +52,13 @@ object SampledRDDs { """.stripMargin) } - parser.parse(args, defaultParams).map { params => - run(params) - } getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"SampledRDDs with $params") val sc = new SparkContext(conf) diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala index f81fc292a3..b76add2f9b 100644 --- a/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala +++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala @@ -60,14 +60,13 @@ object SparseNaiveBayes { .action((x, c) => c.copy(input = x)) } - parser.parse(args, defaultParams).map { params => - run(params) - }.getOrElse { - sys.exit(1) + parser.parse(args, defaultParams) match { + case Some(params) => run(params) + case _ => sys.exit(1) } } - def run(params: Params) { + def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"SparseNaiveBayes with $params") val sc = new SparkContext(conf) diff --git a/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala index 96aa262a39..88b59a343a 100644 --- a/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala +++ b/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala @@ -62,7 +62,7 @@ class GraphSuite extends SparkFunSuite with LocalSparkContext { assert( graph.edges.count() === rawEdges.size ) // Vertices not explicitly provided but referenced by edges should be created automatically assert( graph.vertices.count() === 100) - graph.triplets.collect().map { et => + graph.triplets.collect().foreach { et => assert((et.srcId < 10 && et.srcAttr) || (et.srcId >= 10 && !et.srcAttr)) assert((et.dstId < 10 && et.dstAttr) || (et.dstId >= 10 && !et.dstAttr)) } diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala index 871b1c7d21..9a3d64fca5 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala @@ -268,7 +268,7 @@ class KMeans private ( val iterationStartTime = System.nanoTime() - instr.map(_.logNumFeatures(centers(0)(0).vector.size)) + instr.foreach(_.logNumFeatures(centers(0)(0).vector.size)) // Execute iterations of Lloyd's algorithm until all runs have converged while (iteration < maxIterations && !activeRuns.isEmpty) { diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala index 16c74f6785..0b441f8b80 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala @@ -138,8 +138,8 @@ class Word2VecSuite extends SparkFunSuite with MLlibTestSparkContext with Defaul case Row(w: String, sim: Double) => (w, sim) }.collect().unzip - assert(synonyms.toArray === Array("b", "c")) - expectedSimilarity.zip(similarity).map { + assert(synonyms === Array("b", "c")) + expectedSimilarity.zip(similarity).foreach { case (expected, actual) => assert(math.abs((expected - actual) / expected) < 1E-5) } } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala index 0c0aefc52b..5ec4c15387 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala @@ -307,7 +307,7 @@ class NaiveBayesSuite extends SparkFunSuite with MLlibTestSparkContext { val tempDir = Utils.createTempDir() val path = tempDir.toURI.toString - Seq(NaiveBayesSuite.binaryBernoulliModel, NaiveBayesSuite.binaryMultinomialModel).map { + Seq(NaiveBayesSuite.binaryBernoulliModel, NaiveBayesSuite.binaryMultinomialModel).foreach { model => // Save model, load it back, and compare. try { diff --git a/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala index 8416771552..e30ad15967 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala @@ -80,7 +80,7 @@ class RandomDataGeneratorSuite extends SparkFunSuite { } test("LogNormalGenerator") { - List((0.0, 1.0), (0.0, 2.0), (2.0, 1.0), (2.0, 2.0)).map { + List((0.0, 1.0), (0.0, 2.0), (2.0, 1.0), (2.0, 2.0)).foreach { case (mean: Double, vari: Double) => val normal = new LogNormalGenerator(mean, math.sqrt(vari)) apiChecks(normal) @@ -125,7 +125,7 @@ class RandomDataGeneratorSuite extends SparkFunSuite { test("GammaGenerator") { // mean = 0.0 will not pass the API checks since 0.0 is always deterministically produced. - List((1.0, 2.0), (2.0, 2.0), (3.0, 2.0), (5.0, 1.0), (9.0, 0.5)).map { + List((1.0, 2.0), (2.0, 2.0), (3.0, 2.0), (5.0, 1.0), (9.0, 0.5)).foreach { case (shape: Double, scale: Double) => val gamma = new GammaGenerator(shape, scale) apiChecks(gamma) @@ -138,7 +138,7 @@ class RandomDataGeneratorSuite extends SparkFunSuite { } test("WeibullGenerator") { - List((1.0, 2.0), (2.0, 3.0), (2.5, 3.5), (10.4, 2.222)).map { + List((1.0, 2.0), (2.0, 3.0), (2.5, 3.5), (10.4, 2.222)).foreach { case (alpha: Double, beta: Double) => val weibull = new WeibullGenerator(alpha, beta) apiChecks(weibull) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala index b3f20692b2..2a445b8cdb 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala @@ -141,7 +141,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper { val primitiveTypes = Seq(IntegerType, FloatType, DoubleType, StringType, ByteType, ShortType, LongType, BinaryType, BooleanType, DecimalType.USER_DEFAULT, TimestampType) - primitiveTypes.map { t => + primitiveTypes.foreach { t => val dataGen = RandomDataGenerator.forType(t, nullable = true).get val inputData = Seq.fill(10) { val value = dataGen.apply() @@ -182,7 +182,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper { val primitiveTypes = Seq(IntegerType, FloatType, DoubleType, StringType, ByteType, ShortType, LongType, BinaryType, BooleanType, DecimalType.USER_DEFAULT, TimestampType) - primitiveTypes.map { t => + primitiveTypes.foreach { t => val dataGen = RandomDataGenerator.forType(t, nullable = true).get val inputData = Seq.fill(10) { val value = dataGen.apply() diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala index 1aadd700d7..babf944e6a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala @@ -79,7 +79,7 @@ object CompressionSchemeBenchmark extends AllCompressionSchemes { input: ByteBuffer): Unit = { val benchmark = new Benchmark(name, iters * count) - schemes.filter(_.supports(tpe)).map { scheme => + schemes.filter(_.supports(tpe)).foreach { scheme => val (compressFunc, compressionRatio, buf) = prepareEncodeInternal(count, tpe, scheme, input) val label = s"${getFormattedClassName(scheme)}(${compressionRatio.formatted("%.3f")})" @@ -103,7 +103,7 @@ object CompressionSchemeBenchmark extends AllCompressionSchemes { input: ByteBuffer): Unit = { val benchmark = new Benchmark(name, iters * count) - schemes.filter(_.supports(tpe)).map { scheme => + schemes.filter(_.supports(tpe)).foreach { scheme => val (compressFunc, _, buf) = prepareEncodeInternal(count, tpe, scheme, input) val compressedBuf = compressFunc(input, buf) val label = s"${getFormattedClassName(scheme)}" diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala index 988a577a7b..a530e27074 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala @@ -47,7 +47,7 @@ class IntegralDeltaSuite extends SparkFunSuite { } } - input.map { value => + input.foreach { value => val row = new GenericMutableRow(1) columnType.setField(row, 0, value) builder.appendFrom(row, 0) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala index ddcc24a7f5..2f551b1a01 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala @@ -343,7 +343,7 @@ class FileSourceStrategySuite extends QueryTest with SharedSQLContext with Predi test("SPARK-15654 do not split non-splittable files") { // Check if a non-splittable file is not assigned into partitions - Seq("gz", "snappy", "lz4").map { suffix => + Seq("gz", "snappy", "lz4").foreach { suffix => val table = createTable( files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1) ) @@ -359,7 +359,7 @@ class FileSourceStrategySuite extends QueryTest with SharedSQLContext with Predi } // Check if a splittable compressed file is assigned into multiple partitions - Seq("bz2").map { suffix => + Seq("bz2").foreach { suffix => val table = createTable( files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1) ) diff --git a/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala index 6763ac64da..0349e11224 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala @@ -34,7 +34,7 @@ class ReceiverInputDStreamSuite extends TestSuiteBase with BeforeAndAfterAll { override def afterAll(): Unit = { try { - StreamingContext.getActive().map { _.stop() } + StreamingContext.getActive().foreach(_.stop()) } finally { super.afterAll() } -- cgit v1.2.3