aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-04-12 00:43:28 -0700
committerReynold Xin <rxin@databricks.com>2016-04-12 00:43:28 -0700
commitb0f5497e9520575e5082fa8ce8be5569f43abe74 (patch)
treeefd349be7227cf20616712fc7376b7c2f11f6614 /sql
parent678b96e77bf77a64b8df14b19db5a3bb18febfe3 (diff)
downloadspark-b0f5497e9520575e5082fa8ce8be5569f43abe74.tar.gz
spark-b0f5497e9520575e5082fa8ce8be5569f43abe74.tar.bz2
spark-b0f5497e9520575e5082fa8ce8be5569f43abe74.zip
[SPARK-14508][BUILD] Add a new ScalaStyle Rule `OmitBracesInCase`
## What changes were proposed in this pull request? According to the [Spark Code Style Guide](https://cwiki.apache.org/confluence/display/SPARK/Spark+Code+Style+Guide) and [Scala Style Guide](http://docs.scala-lang.org/style/control-structures.html#curlybraces), we had better enforce the following rule. ``` case: Always omit braces in case clauses. ``` This PR makes a new ScalaStyle rule, 'OmitBracesInCase', and enforces it to the code. ## How was this patch tested? Pass the Jenkins tests (including Scala style checking) Author: Dongjoon Hyun <dongjoon@apache.org> Closes #12280 from dongjoon-hyun/SPARK-14508.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala3
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/EquivalentExpressions.scala4
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala18
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala9
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala8
5 files changed, 14 insertions, 28 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
index d842ffdc66..0f8876a9e6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
@@ -898,7 +898,7 @@ case class Cast(child: Expression, dataType: DataType) extends UnaryExpression w
val result = ctx.freshName("result")
val tmpRow = ctx.freshName("tmpRow")
- val fieldsEvalCode = fieldsCasts.zipWithIndex.map { case (cast, i) => {
+ val fieldsEvalCode = fieldsCasts.zipWithIndex.map { case (cast, i) =>
val fromFieldPrim = ctx.freshName("ffp")
val fromFieldNull = ctx.freshName("ffn")
val toFieldPrim = ctx.freshName("tfp")
@@ -920,7 +920,6 @@ case class Cast(child: Expression, dataType: DataType) extends UnaryExpression w
}
}
"""
- }
}.mkString("\n")
(c, evPrim, evNull) =>
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/EquivalentExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/EquivalentExpressions.scala
index affd1bdb32..8d8cc152ff 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/EquivalentExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/EquivalentExpressions.scala
@@ -97,11 +97,11 @@ class EquivalentExpressions {
def debugString(all: Boolean = false): String = {
val sb: mutable.StringBuilder = new StringBuilder()
sb.append("Equivalent expressions:\n")
- equivalenceMap.foreach { case (k, v) => {
+ equivalenceMap.foreach { case (k, v) =>
if (all || v.length > 1) {
sb.append(" " + v.mkString(", ")).append("\n")
}
- }}
+ }
sb.toString()
}
}
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
index 8207d64798..711e870711 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
@@ -196,12 +196,11 @@ object RandomDataGenerator {
case ShortType => randomNumeric[Short](
rand, _.nextInt().toShort, Seq(Short.MinValue, Short.MaxValue, 0.toShort))
case NullType => Some(() => null)
- case ArrayType(elementType, containsNull) => {
+ case ArrayType(elementType, containsNull) =>
forType(elementType, nullable = containsNull, rand).map {
elementGenerator => () => Seq.fill(rand.nextInt(MAX_ARR_SIZE))(elementGenerator())
}
- }
- case MapType(keyType, valueType, valueContainsNull) => {
+ case MapType(keyType, valueType, valueContainsNull) =>
for (
keyGenerator <- forType(keyType, nullable = false, rand);
valueGenerator <-
@@ -221,8 +220,7 @@ object RandomDataGenerator {
keys.zip(values).toMap
}
}
- }
- case StructType(fields) => {
+ case StructType(fields) =>
val maybeFieldGenerators: Seq[Option[() => Any]] = fields.map { field =>
forType(field.dataType, nullable = field.nullable, rand)
}
@@ -232,8 +230,7 @@ object RandomDataGenerator {
} else {
None
}
- }
- case udt: UserDefinedType[_] => {
+ case udt: UserDefinedType[_] =>
val maybeSqlTypeGenerator = forType(udt.sqlType, nullable, rand)
// Because random data generator at here returns scala value, we need to
// convert it to catalyst value to call udt's deserialize.
@@ -253,7 +250,6 @@ object RandomDataGenerator {
} else {
None
}
- }
case unsupportedType => None
}
// Handle nullability by wrapping the non-null value generator:
@@ -277,7 +273,7 @@ object RandomDataGenerator {
val fields = mutable.ArrayBuffer.empty[Any]
schema.fields.foreach { f =>
f.dataType match {
- case ArrayType(childType, nullable) => {
+ case ArrayType(childType, nullable) =>
val data = if (f.nullable && rand.nextFloat() <= PROBABILITY_OF_NULL) {
null
} else {
@@ -294,10 +290,8 @@ object RandomDataGenerator {
arr
}
fields += data
- }
- case StructType(children) => {
+ case StructType(children) =>
fields += randomRow(rand, StructType(children))
- }
case _ =>
val generator = RandomDataGenerator.forType(f.dataType, f.nullable, rand)
assert(generator.isDefined, "Unsupported type")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index 86c6405522..e953a6e8ef 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -1153,14 +1153,12 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
private def verifyNonExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
- case agg: TungstenAggregate => {
+ case agg: TungstenAggregate =>
atFirstAgg = !atFirstAgg
- }
- case _ => {
+ case _ =>
if (atFirstAgg) {
fail("Should not have operators between the two aggregations")
}
- }
}
}
@@ -1170,12 +1168,11 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
private def verifyExchangingAgg(df: DataFrame) = {
var atFirstAgg: Boolean = false
df.queryExecution.executedPlan.foreach {
- case agg: TungstenAggregate => {
+ case agg: TungstenAggregate =>
if (atFirstAgg) {
fail("Should not have back to back Aggregates")
}
atFirstAgg = true
- }
case e: ShuffleExchange => atFirstAgg = false
case _ =>
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala
index 8a551cd78c..31b63f2ce1 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala
@@ -612,23 +612,20 @@ class ColumnarBatchSuite extends SparkFunSuite {
val a2 = r2.getList(v._2).toArray
assert(a1.length == a2.length, "Seed = " + seed)
childType match {
- case DoubleType => {
+ case DoubleType =>
var i = 0
while (i < a1.length) {
assert(doubleEquals(a1(i).asInstanceOf[Double], a2(i).asInstanceOf[Double]),
"Seed = " + seed)
i += 1
}
- }
- case FloatType => {
+ case FloatType =>
var i = 0
while (i < a1.length) {
assert(doubleEquals(a1(i).asInstanceOf[Float], a2(i).asInstanceOf[Float]),
"Seed = " + seed)
i += 1
}
- }
-
case t: DecimalType =>
var i = 0
while (i < a1.length) {
@@ -640,7 +637,6 @@ class ColumnarBatchSuite extends SparkFunSuite {
}
i += 1
}
-
case _ => assert(a1 === a2, "Seed = " + seed)
}
case StructType(childFields) =>