From e75c162e9e510d74b07f28ccf6c7948ac317a7c6 Mon Sep 17 00:00:00 2001 From: Shivansh Date: Sun, 4 Sep 2016 12:39:26 +0100 Subject: [SPARK-17308] Improved the spark core code by replacing all pattern match on boolean value by if/else block. ## What changes were proposed in this pull request? Improved the code quality of spark by replacing all pattern match on boolean value by if/else block. ## How was this patch tested? By running the tests Author: Shivansh Closes #14873 from shiv4nsh/SPARK-17308. --- .../scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala | 7 ++++--- .../org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala | 7 ++++--- .../apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala | 9 ++++----- 3 files changed, 12 insertions(+), 11 deletions(-) (limited to 'sql') diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala index 24a2dc9d3b..037f8cb287 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala @@ -103,9 +103,10 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product { * Find the first [[TreeNode]] that satisfies the condition specified by `f`. * The condition is recursively applied to this node and all of its children (pre-order). */ - def find(f: BaseType => Boolean): Option[BaseType] = f(this) match { - case true => Some(this) - case false => children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) } + def find(f: BaseType => Boolean): Option[BaseType] = if (f(this)) { + Some(this) + } else { + children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) } } /** diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala index d5d151a580..a7ac613683 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringKeyHashMap.scala @@ -22,9 +22,10 @@ package org.apache.spark.sql.catalyst.util * sensitive or insensitive. */ object StringKeyHashMap { - def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = caseSensitive match { - case false => new StringKeyHashMap[T](_.toLowerCase) - case true => new StringKeyHashMap[T](identity) + def apply[T](caseSensitive: Boolean): StringKeyHashMap[T] = if (caseSensitive) { + new StringKeyHashMap[T](identity) + } else { + new StringKeyHashMap[T](_.toLowerCase) } } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala index e7c8615bc5..21afe9fec5 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala @@ -414,11 +414,10 @@ class AnalysisErrorSuite extends AnalysisTest { AttributeReference("a", dataType)(exprId = ExprId(2)), AttributeReference("b", IntegerType)(exprId = ExprId(1)))) - shouldSuccess match { - case true => - assertAnalysisSuccess(plan, true) - case false => - assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil) + if (shouldSuccess) { + assertAnalysisSuccess(plan, true) + } else { + assertAnalysisError(plan, "expression `a` cannot be used as a grouping expression" :: Nil) } } -- cgit v1.2.3