aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-03-10 15:57:22 -0800
committerAndrew Or <andrew@databricks.com>2016-03-10 15:57:22 -0800
commit91fed8e9c57764eca9463d129ecd68196db7f566 (patch)
treeb06c678dc15258af92116019760e6b9c98d81c2d /sql
parent81d48532d954a8aea28d7e1fb3aa32a78c708b63 (diff)
downloadspark-91fed8e9c57764eca9463d129ecd68196db7f566.tar.gz
spark-91fed8e9c57764eca9463d129ecd68196db7f566.tar.bz2
spark-91fed8e9c57764eca9463d129ecd68196db7f566.zip
[SPARK-3854][BUILD] Scala style: require spaces before `{`.
## What changes were proposed in this pull request? Since the opening curly brace, '{', has many usages as discussed in [SPARK-3854](https://issues.apache.org/jira/browse/SPARK-3854), this PR adds a ScalaStyle rule to prevent '){' pattern for the following majority pattern and fixes the code accordingly. If we enforce this in ScalaStyle from now, it will improve the Scala code quality and reduce review time. ``` // Correct: if (true) { println("Wow!") } // Incorrect: if (true){ println("Wow!") } ``` IntelliJ also shows new warnings based on this. ## How was this patch tested? Pass the Jenkins ScalaStyle test. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11637 from dongjoon-hyun/SPARK-3854.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala8
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala2
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala2
11 files changed, 14 insertions, 14 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
index 87e43429e6..efd75295b2 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
@@ -159,7 +159,7 @@ case class CreateNamedStruct(children: Seq[Expression]) extends Expression {
TypeCheckResult.TypeCheckFailure(
s"Only foldable StringType expressions are allowed to appear at odd position , got :" +
s" ${invalidNames.mkString(",")}")
- } else if (!names.contains(null)){
+ } else if (!names.contains(null)) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure("Field name should not be null")
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala
index bc2df0fb4a..12fcc40376 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/mathExpressions.scala
@@ -806,14 +806,14 @@ case class Round(child: Expression, scale: Expression)
case FloatType => // if child eval to NaN or Infinity, just return it.
if (_scale == 0) {
s"""
- if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})){
+ if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})) {
${ev.value} = ${ce.value};
} else {
${ev.value} = Math.round(${ce.value});
}"""
} else {
s"""
- if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})){
+ if (Float.isNaN(${ce.value}) || Float.isInfinite(${ce.value})) {
${ev.value} = ${ce.value};
} else {
${ev.value} = java.math.BigDecimal.valueOf(${ce.value}).
@@ -823,14 +823,14 @@ case class Round(child: Expression, scale: Expression)
case DoubleType => // if child eval to NaN or Infinity, just return it.
if (_scale == 0) {
s"""
- if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})){
+ if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})) {
${ev.value} = ${ce.value};
} else {
${ev.value} = Math.round(${ce.value});
}"""
} else {
s"""
- if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})){
+ if (Double.isNaN(${ce.value}) || Double.isInfinite(${ce.value})) {
${ev.value} = ${ce.value};
} else {
${ev.value} = java.math.BigDecimal.valueOf(${ce.value}).
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
index 1e7118144f..d9577dea1b 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
@@ -86,7 +86,7 @@ class RowTest extends FunSpec with Matchers {
}
}
- it("getAs() on type extending AnyVal does not throw exception when value is null"){
+ it("getAs() on type extending AnyVal does not throw exception when value is null") {
sampleRowWithoutCol3.getAs[String](sampleRowWithoutCol3.fieldIndex("col1")) shouldBe null
}
}
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
index 97a0cde381..a636d63012 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
@@ -535,7 +535,7 @@ class FilterPushdownSuite extends PlanTest {
// Filter("c" > 6)
assertResult(classOf[Filter])(optimized.getClass)
assertResult(1)(optimized.asInstanceOf[Filter].condition.references.size)
- assertResult("c"){
+ assertResult("c") {
optimized.asInstanceOf[Filter].condition.references.toSeq(0).name
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index 339e61e572..24f61992d4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -1147,7 +1147,7 @@ class DataFrame private[sql](
* columns of the input row are implicitly joined with each value that is output by the function.
*
* {{{
- * df.explode("words", "word"){words: String => words.split(" ")}
+ * df.explode("words", "word") {words: String => words.split(" ")}
* }}}
* @group dfops
* @since 1.3.0
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index dd1fbcf3c8..daddf6e0c5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -605,7 +605,7 @@ class Dataset[T] private[sql](
* duplicate items. As such, it is analogous to `UNION ALL` in SQL.
* @since 1.6.0
*/
- def union(other: Dataset[T]): Dataset[T] = withPlan[T](other){ (left, right) =>
+ def union(other: Dataset[T]): Dataset[T] = withPlan[T](other) { (left, right) =>
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(left, right))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala
index 032ba61d9d..41cff07472 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala
@@ -57,7 +57,7 @@ private[datasources] object CompressionCodecs {
* `codec` should be a full class path
*/
def setCodecConfiguration(conf: Configuration, codec: String): Unit = {
- if (codec != null){
+ if (codec != null) {
conf.set("mapreduce.output.fileoutputformat.compress", "true")
conf.set("mapreduce.output.fileoutputformat.compress.type", CompressionType.BLOCK.toString)
conf.set("mapreduce.output.fileoutputformat.compress.codec", codec)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
index 49915adf6c..9d3cd9bb14 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
@@ -114,7 +114,7 @@ private[sql] class ExecutionPage(parent: SQLTab) extends WebUIPage("execution")
{metadata}
</div>
{planVisualizationResources}
- <script>$(function(){{ renderPlanViz(); }})</script>
+ <script>$(function() {{ renderPlanViz(); }})</script>
</div>
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index 34e914cb1e..b7834d76cc 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -145,7 +145,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext {
withTempPath { dir =>
val data = makeDecimalRDD(DecimalType(precision, scale))
data.write.parquet(dir.getCanonicalPath)
- readParquetFile(dir.getCanonicalPath){ df => {
+ readParquetFile(dir.getCanonicalPath) { df => {
checkAnswer(df, data.collect().toSeq)
}}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
index 5e7b93d457..16b2d042a2 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
@@ -65,7 +65,7 @@ class FiltersSuite extends SparkFunSuite with Logging {
"")
private def filterTest(name: String, filters: Seq[Expression], result: String) = {
- test(name){
+ test(name) {
val converted = shim.convertFilters(testTable, filters)
if (converted != result) {
fail(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index 703cfffee1..d7c529ab0e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -361,7 +361,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
}
}
- test("SPARK-11522 select input_file_name from non-parquet table"){
+ test("SPARK-11522 select input_file_name from non-parquet table") {
withTempDir { tempDir =>