aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/Client.scala41
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala9
-rw-r--r--project/SparkBuild.scala7
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala7
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala7
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala9
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala7
7 files changed, 44 insertions, 43 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala b/core/src/main/scala/org/apache/spark/deploy/Client.scala
index bf2dab6e71..ee276e1b71 100644
--- a/core/src/main/scala/org/apache/spark/deploy/Client.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala
@@ -124,27 +124,26 @@ private class ClientEndpoint(
logInfo("... polling master for driver state")
val statusResponse =
activeMasterEndpoint.askWithRetry[DriverStatusResponse](RequestDriverStatus(driverId))
- statusResponse.found match {
- case false =>
- logError(s"ERROR: Cluster master did not recognize $driverId")
- System.exit(-1)
- case true =>
- logInfo(s"State of $driverId is ${statusResponse.state.get}")
- // Worker node, if present
- (statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match {
- case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) =>
- logInfo(s"Driver running on $hostPort ($id)")
- case _ =>
- }
- // Exception, if present
- statusResponse.exception match {
- case Some(e) =>
- logError(s"Exception from cluster was: $e")
- e.printStackTrace()
- System.exit(-1)
- case _ =>
- System.exit(0)
- }
+ if (statusResponse.found) {
+ logInfo(s"State of $driverId is ${statusResponse.state.get}")
+ // Worker node, if present
+ (statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match {
+ case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) =>
+ logInfo(s"Driver running on $hostPort ($id)")
+ case _ =>
+ }
+ // Exception, if present
+ statusResponse.exception match {
+ case Some(e) =>
+ logError(s"Exception from cluster was: $e")
+ e.printStackTrace()
+ System.exit(-1)
+ case _ =>
+ System.exit(0)
+ }
+ } else {
+ logError(s"ERROR: Cluster master did not recognize $driverId")
+ System.exit(-1)
}
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala
index 3003c62d98..2d35b31208 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala
@@ -304,11 +304,10 @@ class KMeansSuite extends SparkFunSuite with MLlibTestSparkContext {
object KMeansSuite extends SparkFunSuite {
def createModel(dim: Int, k: Int, isSparse: Boolean): KMeansModel = {
- val singlePoint = isSparse match {
- case true =>
- Vectors.sparse(dim, Array.empty[Int], Array.empty[Double])
- case _ =>
- Vectors.dense(Array.fill[Double](dim)(0.0))
+ val singlePoint = if (isSparse) {
+ Vectors.sparse(dim, Array.empty[Int], Array.empty[Double])
+ } else {
+ Vectors.dense(Array.fill[Double](dim)(0.0))
}
new KMeansModel(Array.fill[Vector](k)(singlePoint))
}
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 83a7c0864f..d164ead4ba 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -212,9 +212,10 @@ object SparkBuild extends PomBuild {
cachedFun(findFiles(scalaSource.in(config).value))
}
- private def findFiles(file: File): Set[File] = file.isDirectory match {
- case true => file.listFiles().toSet.flatMap(findFiles) + file
- case false => Set(file)
+ private def findFiles(file: File): Set[File] = if (file.isDirectory) {
+ file.listFiles().toSet.flatMap(findFiles) + file
+ } else {
+ Set(file)
}
def enableScalaStyle: Seq[sbt.Def.Setting[_]] = Seq(
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index 24a2dc9d3b..037f8cb287 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -103,9 +103,10 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product {
* Find the first [[TreeNode]] that satisfies the condition specified by `f`.
* The condition is recursively applied to this node and all of its children (pre-order).
*/
- def find(f: BaseType => Boolean): Option[BaseType] = f(this) match {
- case true => Some(this)
- case false => children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) }
+ def find(f: BaseType => Boolean): Option[BaseType] = if (f(this)) {
+ Some(this)
+ } else {
+ children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) }
}
/**
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala
index d5d151a580..a7ac613683 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala
@@ -22,9 +22,10 @@ package org.apache.spark.sql.catalyst.util
* sensitive or insensitive.
*/
object StringKeyHashMap {
- def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = caseSensitive match {
- case false => new StringKeyHashMap[T](_.toLowerCase)
- case true => new StringKeyHashMap[T](identity)
+ def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = if (caseSensitive) {
+ new StringKeyHashMap[T](identity)
+ } else {
+ new StringKeyHashMap[T](_.toLowerCase)
}
}
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
index e7c8615bc5..21afe9fec5 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
@@ -414,11 +414,10 @@ class AnalysisErrorSuite extends AnalysisTest {
AttributeReference("a", dataType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
- shouldSuccess match {
- case true =>
- assertAnalysisSuccess(plan, true)
- case false =>
- assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil)
+ if (shouldSuccess) {
+ assertAnalysisSuccess(plan, true)
+ } else {
+ assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil)
}
}
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
index dec983165f..da9ff85885 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
@@ -471,9 +471,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
val list: JList[V] = values.asJava
val scalaState: Optional[S] = JavaUtils.optionToOptional(state)
val result: Optional[S] = in.apply(list, scalaState)
- result.isPresent match {
- case true => Some(result.get())
- case _ => None
+ if (result.isPresent) {
+ Some(result.get())
+ } else {
+ None
}
}
scalaFunc