aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-28 23:00:02 -0700
committerReynold Xin <rxin@databricks.com>2015-05-28 23:00:02 -0700
commit97a60cf75d1fed654953eccedd04f3442389c5ca (patch)
tree2b681949925a5acb86ff925ee2c86b9c2d6f867c /sql
parent36067ce398e2949c2f122625e67fd5497febdee6 (diff)
downloadspark-97a60cf75d1fed654953eccedd04f3442389c5ca.tar.gz
spark-97a60cf75d1fed654953eccedd04f3442389c5ca.tar.bz2
spark-97a60cf75d1fed654953eccedd04f3442389c5ca.zip
[SPARK-7929] Turn whitespace checker on for more token types.
This is the last batch of changes to complete SPARK-7929. Previous related PRs: https://github.com/apache/spark/pull/6480 https://github.com/apache/spark/pull/6478 https://github.com/apache/spark/pull/6477 https://github.com/apache/spark/pull/6476 https://github.com/apache/spark/pull/6475 https://github.com/apache/spark/pull/6474 https://github.com/apache/spark/pull/6473 Author: Reynold Xin <rxin@databricks.com> Closes #6487 from rxin/whitespace-lint and squashes the following commits: b33d43d [Reynold Xin] [SPARK-7929] Turn whitespace checker on for more token types.
Diffstat (limited to 'sql')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala12
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/ListTablesSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala4
9 files changed, 20 insertions, 20 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
index 2a7374cc17..80c2d32bf7 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
@@ -78,10 +78,10 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors {
Literal(java.sql.Date.valueOf("2014-09-23")) ::
Literal(Decimal(BigDecimal(123.123))) ::
Literal(new java.sql.Timestamp(123123)) ::
- Literal(Array[Byte](1,2,3)) ::
- Literal.create(Seq[Int](1,2,3), ArrayType(IntegerType)) ::
- Literal.create(Map[Int, Int](1->2, 2->1), MapType(IntegerType, IntegerType)) ::
- Literal.create(Row(1,2.0d,3.0f),
+ Literal(Array[Byte](1, 2, 3)) ::
+ Literal.create(Seq[Int](1, 2, 3), ArrayType(IntegerType)) ::
+ Literal.create(Map[Int, Int](1 -> 2, 2 -> 1), MapType(IntegerType, IntegerType)) ::
+ Literal.create(Row(1, 2.0d, 3.0f),
StructType(StructField("c1", IntegerType) ::
StructField("c2", DoubleType) ::
StructField("c3", FloatType) :: Nil)) ::
@@ -111,8 +111,8 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors {
case DecimalType() => PrimitiveObjectInspectorFactory.writableHiveDecimalObjectInspector
case StructType(fields) =>
ObjectInspectorFactory.getStandardStructObjectInspector(
- java.util.Arrays.asList(fields.map(f => f.name) :_*),
- java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)) :_*))
+ java.util.Arrays.asList(fields.map(f => f.name) : _*),
+ java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)) : _*))
}
def checkDataType(dt1: Seq[DataType], dt2: Seq[DataType]): Unit = {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index acf2f7da30..9cc4685499 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -160,7 +160,7 @@ class InsertIntoHiveTableSuite extends QueryTest with BeforeAndAfter {
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil ,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=4"::Nil
)
- assert(listFolders(tmpDir,List()).sortBy(_.toString()) == expected.sortBy(_.toString))
+ assert(listFolders(tmpDir, List()).sortBy(_.toString()) == expected.sortBy(_.toString))
sql("DROP TABLE table_with_partition")
sql("DROP TABLE tmp_table")
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ListTablesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ListTablesSuite.scala
index e12a6c21cc..1c15997ea8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ListTablesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ListTablesSuite.scala
@@ -29,7 +29,7 @@ class ListTablesSuite extends QueryTest with BeforeAndAfterAll {
import org.apache.spark.sql.hive.test.TestHive.implicits._
val df =
- sparkContext.parallelize((1 to 10).map(i => (i,s"str$i"))).toDF("key", "value")
+ sparkContext.parallelize((1 to 10).map(i => (i, s"str$i"))).toDF("key", "value")
override def beforeAll(): Unit = {
// The catalog in HiveContext is a case insensitive one.
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
index 85b6bc93d7..8245047626 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
@@ -26,9 +26,9 @@ case class FunctionResult(f1: String, f2: String)
class UDFSuite extends QueryTest {
test("UDF case insensitive") {
- udf.register("random0", () => { Math.random()})
- udf.register("RANDOM1", () => { Math.random()})
- udf.register("strlenScala", (_: String).length + (_:Int))
+ udf.register("random0", () => { Math.random() })
+ udf.register("RANDOM1", () => { Math.random() })
+ udf.register("strlenScala", (_: String).length + (_: Int))
assert(sql("SELECT RANDOM0() FROM src LIMIT 1").head().getDouble(0) >= 0.0)
assert(sql("SELECT RANDOm1() FROM src LIMIT 1").head().getDouble(0) >= 0.0)
assert(sql("SELECT strlenscala('test', 1) FROM src LIMIT 1").head().getInt(0) === 5)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
index 9c056e493b..55e5551b63 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
@@ -273,7 +273,7 @@ abstract class HiveComparisonTest
}
val hiveCacheFiles = queryList.zipWithIndex.map {
- case (queryString, i) =>
+ case (queryString, i) =>
val cachedAnswerName = s"$testCaseName-$i-${getMd5(queryString)}"
new File(answerCache, cachedAnswerName)
}
@@ -304,7 +304,7 @@ abstract class HiveComparisonTest
// other DDL has not been executed yet.
hiveQueries.foreach(_.logical)
val computedResults = (queryList.zipWithIndex, hiveQueries, hiveCacheFiles).zipped.map {
- case ((queryString, i), hiveQuery, cachedAnswerFile)=>
+ case ((queryString, i), hiveQuery, cachedAnswerFile) =>
try {
// Hooks often break the harness and don't really affect our test anyway, don't
// even try running them.
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
index 3dfa6e72e1..b08db6de2d 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
@@ -77,7 +77,7 @@ class HiveResolutionSuite extends HiveComparisonTest {
test("case insensitivity with scala reflection") {
// Test resolution with Scala Reflection
- sparkContext.parallelize(Data(1, 2, Nested(1,2), Seq(Nested(1,2))) :: Nil)
+ sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: Nil)
.toDF().registerTempTable("caseSensitivityTest")
val query = sql("SELECT a, b, A, B, n.a, n.b, n.A, n.B FROM caseSensitivityTest")
@@ -88,14 +88,14 @@ class HiveResolutionSuite extends HiveComparisonTest {
ignore("case insensitivity with scala reflection joins") {
// Test resolution with Scala Reflection
- sparkContext.parallelize(Data(1, 2, Nested(1,2), Seq(Nested(1,2))) :: Nil)
+ sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: Nil)
.toDF().registerTempTable("caseSensitivityTest")
sql("SELECT * FROM casesensitivitytest a JOIN casesensitivitytest b ON a.a = b.a").collect()
}
test("nested repeated resolution") {
- sparkContext.parallelize(Data(1, 2, Nested(1,2), Seq(Nested(1,2))) :: Nil)
+ sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: Nil)
.toDF().registerTempTable("nestedRepeatedTest")
assert(sql("SELECT nestedArray[0].a FROM nestedRepeatedTest").collect().head(0) === 1)
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
index ab53c6309e..0ba4d11478 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
@@ -76,7 +76,7 @@ class HiveTableScanSuite extends HiveComparisonTest {
TestHive.sql(s"LOAD DATA LOCAL INPATH '$location' INTO TABLE timestamp_query_null")
assert(TestHive.sql("SELECT time from timestamp_query_null limit 2").collect()
- === Array(Row(java.sql.Timestamp.valueOf("2014-12-11 00:00:00")),Row(null)))
+ === Array(Row(java.sql.Timestamp.valueOf("2014-12-11 00:00:00")), Row(null)))
TestHive.sql("DROP TABLE timestamp_query_null")
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 538e66125c..27863a6014 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -327,7 +327,7 @@ class SQLQuerySuite extends QueryTest {
"org.apache.hadoop.hive.ql.io.RCFileInputFormat",
"org.apache.hadoop.hive.ql.io.RCFileOutputFormat",
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe",
- "serde_p1=p1", "serde_p2=p2", "tbl_p1=p11", "tbl_p2=p22","MANAGED_TABLE"
+ "serde_p1=p1", "serde_p2=p2", "tbl_p1=p11", "tbl_p2=p22", "MANAGED_TABLE"
)
if (HiveShim.version =="0.13.1") {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
index 7851f38fd4..e62ac909cb 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
@@ -38,7 +38,7 @@ case class ParquetData(intField: Int, stringField: String)
// The data that also includes the partitioning key
case class ParquetDataWithKey(p: Int, intField: Int, stringField: String)
-case class StructContainer(intStructField :Int, stringStructField: String)
+case class StructContainer(intStructField: Int, stringStructField: String)
case class ParquetDataWithComplexTypes(
intField: Int,
@@ -735,7 +735,7 @@ class ParquetDataSourceOnSourceSuite extends ParquetSourceSuiteBase {
val filePath = new File(tempDir, "testParquet").getCanonicalPath
val filePath2 = new File(tempDir, "testParquet2").getCanonicalPath
- val df = Seq(1,2,3).map(i => (i, i.toString)).toDF("int", "str")
+ val df = Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str")
val df2 = df.as('x).join(df.as('y), $"x.str" === $"y.str").groupBy("y.str").max("y.int")
intercept[Throwable](df2.write.parquet(filePath))