aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKousuke Saruta <sarutak@oss.nttdata.co.jp>2016-01-12 22:25:20 -0800
committerReynold Xin <rxin@databricks.com>2016-01-12 22:25:20 -0800
commitcb7b864a24db4826e2942c186afe3cb8bd788b03 (patch)
tree656631f751928b697ad2c48f45338d9dca0864fc
parentdc7b3870fcfc2723319dbb8c53d721211a8116be (diff)
downloadspark-cb7b864a24db4826e2942c186afe3cb8bd788b03.tar.gz
spark-cb7b864a24db4826e2942c186afe3cb8bd788b03.tar.bz2
spark-cb7b864a24db4826e2942c186afe3cb8bd788b03.zip
[SPARK-12692][BUILD][SQL] Scala style: Fix the style violation (Space before ",")
Fix the style violation (space before , and :). This PR is a followup for #10643 and rework of #10685 . Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp> Closes #10732 from sarutak/SPARK-12692-followup-sql.
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala2
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala24
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala2
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala4
10 files changed, 22 insertions, 22 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
index 2a132d8b82..6ec408a673 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
@@ -203,7 +203,7 @@ object SqlParser extends AbstractSparkSQLParser with DataTypeParser {
)
protected lazy val ordering: Parser[Seq[SortOrder]] =
- ( rep1sep(expression ~ direction.? , ",") ^^ {
+ ( rep1sep(expression ~ direction.?, ",") ^^ {
case exps => exps.map(pair => SortOrder(pair._1, pair._2.getOrElse(Ascending)))
}
)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala
index 9fefc5656a..e4417e0955 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala
@@ -122,7 +122,7 @@ object NumberConverter {
* unsigned, otherwise it is signed.
* NB: This logic is borrowed from org.apache.hadoop.hive.ql.ud.UDFConv
*/
- def convert(n: Array[Byte] , fromBase: Int, toBase: Int ): UTF8String = {
+ def convert(n: Array[Byte], fromBase: Int, toBase: Int ): UTF8String = {
if (fromBase < Character.MIN_RADIX || fromBase > Character.MAX_RADIX
|| Math.abs(toBase) < Character.MIN_RADIX
|| Math.abs(toBase) > Character.MAX_RADIX) {
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala
index 000a3b7ecb..6932f185b9 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala
@@ -80,7 +80,7 @@ class BooleanSimplificationSuite extends PlanTest with PredicateHelper {
checkCondition(('a < 2 || 'a > 3 || 'b > 5) && 'a < 2, 'a < 2)
- checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5) , 'a < 2)
+ checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5), 'a < 2)
checkCondition(('a < 2 || 'b > 3) && ('a < 2 || 'c > 5), 'a < 2 || ('b > 3 && 'c > 5))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 2dd82358fb..b909765a7c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -945,7 +945,7 @@ class SQLContext private[sql](
}
}
- // Register a succesfully instantiatd context to the singleton. This should be at the end of
+ // Register a successfully instantiated context to the singleton. This should be at the end of
// the class definition so that the singleton is updated only if there is no exception in the
// construction of the instance.
sparkContext.addSparkListener(new SparkListener {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
index 6b10057707..058d147c7d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
@@ -223,7 +223,7 @@ case class Exchange(
new ShuffledRowRDD(shuffleDependency, specifiedPartitionStartIndices)
}
- protected override def doExecute(): RDD[InternalRow] = attachTree(this , "execute") {
+ protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
coordinator match {
case Some(exchangeCoordinator) =>
val shuffleRDD = exchangeCoordinator.postShuffleRDD(this)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
index 3a283a4e1f..848f1af655 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
@@ -27,7 +27,7 @@ class DatasetCacheSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("persist and unpersist") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
val cached = ds.cache()
// count triggers the caching action. It should not throw.
cached.count()
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
index 53b5f45c2d..693f5aea2d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
@@ -30,7 +30,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("toDS") {
- val data = Seq(("a", 1) , ("b", 2), ("c", 3))
+ val data = Seq(("a", 1), ("b", 2), ("c", 3))
checkAnswer(
data.toDS(),
data: _*)
@@ -87,7 +87,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("as case class / collect") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
checkAnswer(
ds,
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
@@ -105,7 +105,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("map") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.map(v => (v._1, v._2 + 1)),
("a", 2), ("b", 3), ("c", 4))
@@ -124,14 +124,14 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(expr("_2 + 1").as[Int]),
2, 3, 4)
}
test("select 2") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@@ -140,7 +140,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and tuple") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@@ -149,7 +149,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@@ -158,7 +158,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class, fields reordered") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDecoding(
ds.select(
expr("_1").as[String],
@@ -167,28 +167,28 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("filter") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.filter(_._1 == "b"),
("b", 2))
}
test("foreach") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreach(v => acc += v._2)
assert(acc.value == 6)
}
test("foreachPartition") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreachPartition(_.foreach(v => acc += v._2))
assert(acc.value == 6)
}
test("reduce") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == ("sum", 6))
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 4ab148065a..860e07c68c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -206,7 +206,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
- StructType(StructField("f1", LongType, true) :: Nil) ,
+ StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index 03bc830df2..f279b78f47 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -369,7 +369,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
if (counter != 0) {
responseMsg += s", Fetched $counter row(s)"
}
- console.printInfo(responseMsg , null)
+ console.printInfo(responseMsg, null)
// Destroy the driver to release all the locks.
driver.destroy()
} else {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index da7303c791..40e9c9362c 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -154,8 +154,8 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
}
val expected = List(
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=2"::Nil,
- "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=3"::Nil ,
- "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil ,
+ "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=3"::Nil,
+ "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=4"::Nil
)
assert(listFolders(tmpDir, List()).sortBy(_.toString()) === expected.sortBy(_.toString))