aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShivansh <shiv4nsh@gmail.com>2016-09-04 12:39:26 +0100
committerSean Owen <sowen@cloudera.com>2016-09-04 12:39:26 +0100
commite75c162e9e510d74b07f28ccf6c7948ac317a7c6 (patch)
tree3d424bc7733e9d7ccca8e929e914ceeced4c8e19
parent6b156e2fcf9c0c1ed0770a7ad9c54fa374760e17 (diff)
downloadspark-e75c162e9e510d74b07f28ccf6c7948ac317a7c6.tar.gz
spark-e75c162e9e510d74b07f28ccf6c7948ac317a7c6.tar.bz2
spark-e75c162e9e510d74b07f28ccf6c7948ac317a7c6.zip
[SPARK-17308] Improved the spark core code by replacing all pattern match on boolean value by if/else block.
## What changes were proposed in this pull request? Improved the code quality of spark by replacing all pattern match on boolean value by if/else block. ## How was this patch tested? By running the tests Author: Shivansh <shiv4nsh@gmail.com> Closes #14873 from shiv4nsh/SPARK-17308.
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/Client.scala41
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala9
-rw-r--r--project/SparkBuild.scala7
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala7
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala7
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala9
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala7
7 files changed, 44 insertions, 43 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala b/core/src/main/scala/org/apache/spark/deploy/Client.scala
index bf2dab6e71..ee276e1b71 100644
--- a/core/src/main/scala/org/apache/spark/deploy/Client.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala
@@ -124,27 +124,26 @@ private class ClientEndpoint(
logInfo("... polling master for driver state")
val statusResponse =
activeMasterEndpoint.askWithRetry[DriverStatusResponse](RequestDriverStatus(driverId))
- statusResponse.found match {
- case false =>
- logError(s"ERROR: Cluster master did not recognize $driverId")
- System.exit(-1)
- case true =>
- logInfo(s"State of $driverId is ${statusResponse.state.get}")
- // Worker node, if present
- (statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match {
- case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) =>
- logInfo(s"Driver running on $hostPort ($id)")
- case _ =>
- }
- // Exception, if present
- statusResponse.exception match {
- case Some(e) =>
- logError(s"Exception from cluster was: $e")
- e.printStackTrace()
- System.exit(-1)
- case _ =>
- System.exit(0)
- }
+ if (statusResponse.found) {
+ logInfo(s"State of $driverId is ${statusResponse.state.get}")
+ // Worker node, if present
+ (statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match {
+ case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) =>
+ logInfo(s"Driver running on $hostPort ($id)")
+ case _ =>
+ }
+ // Exception, if present
+ statusResponse.exception match {
+ case Some(e) =>
+ logError(s"Exception from cluster was: $e")
+ e.printStackTrace()
+ System.exit(-1)
+ case _ =>
+ System.exit(0)
+ }
+ } else {
+ logError(s"ERROR: Cluster master did not recognize $driverId")
+ System.exit(-1)
}
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala
index 3003c62d98..2d35b31208 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala
@@ -304,11 +304,10 @@ class KMeansSuite extends SparkFunSuite with MLlibTestSparkContext {
object KMeansSuite extends SparkFunSuite {
def createModel(dim: Int, k: Int, isSparse: Boolean): KMeansModel = {
- val singlePoint = isSparse match {
- case true =>
- Vectors.sparse(dim, Array.empty[Int], Array.empty[Double])
- case _ =>
- Vectors.dense(Array.fill[Double](dim)(0.0))
+ val singlePoint = if (isSparse) {
+ Vectors.sparse(dim, Array.empty[Int], Array.empty[Double])
+ } else {
+ Vectors.dense(Array.fill[Double](dim)(0.0))
}
new KMeansModel(Array.fill[Vector](k)(singlePoint))
}
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 83a7c0864f..d164ead4ba 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -212,9 +212,10 @@ object SparkBuild extends PomBuild {
cachedFun(findFiles(scalaSource.in(config).value))
}
- private def findFiles(file: File): Set[File] = file.isDirectory match {
- case true => file.listFiles().toSet.flatMap(findFiles) + file
- case false => Set(file)
+ private def findFiles(file: File): Set[File] = if (file.isDirectory) {
+ file.listFiles().toSet.flatMap(findFiles) + file
+ } else {
+ Set(file)
}
def enableScalaStyle: Seq[sbt.Def.Setting[_]] = Seq(
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index 24a2dc9d3b..037f8cb287 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -103,9 +103,10 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product {
* Find the first [[TreeNode]] that satisfies the condition specified by `f`.
* The condition is recursively applied to this node and all of its children (pre-order).
*/
- def find(f: BaseType => Boolean): Option[BaseType] = f(this) match {
- case true => Some(this)
- case false => children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) }
+ def find(f: BaseType => Boolean): Option[BaseType] = if (f(this)) {
+ Some(this)
+ } else {
+ children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) }
}
/**
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala
index d5d151a580..a7ac613683 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala
@@ -22,9 +22,10 @@ package org.apache.spark.sql.catalyst.util
* sensitive or insensitive.
*/
object StringKeyHashMap {
- def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = caseSensitive match {
- case false => new StringKeyHashMap[T](_.toLowerCase)
- case true => new StringKeyHashMap[T](identity)
+ def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = if (caseSensitive) {
+ new StringKeyHashMap[T](identity)
+ } else {
+ new StringKeyHashMap[T](_.toLowerCase)
}
}
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
index e7c8615bc5..21afe9fec5 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
@@ -414,11 +414,10 @@ class AnalysisErrorSuite extends AnalysisTest {
AttributeReference("a", dataType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
- shouldSuccess match {
- case true =>
- assertAnalysisSuccess(plan, true)
- case false =>
- assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil)
+ if (shouldSuccess) {
+ assertAnalysisSuccess(plan, true)
+ } else {
+ assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil)
}
}
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
index dec983165f..da9ff85885 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
@@ -471,9 +471,10 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
val list: JList[V] = values.asJava
val scalaState: Optional[S] = JavaUtils.optionToOptional(state)
val result: Optional[S] = in.apply(list, scalaState)
- result.isPresent match {
- case true => Some(result.get())
- case _ => None
+ if (result.isPresent) {
+ Some(result.get())
+ } else {
+ None
}
}
scalaFunc