aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-04-13 09:29:04 -0700
committerReynold Xin <rxin@databricks.com>2015-04-13 09:29:04 -0700
commitc5b0b296b842926b5c07531a5affe8984bc799c5 (patch)
treea01bcfeee05deb3c01436835a0263b84d9f03880
parent77620be76e82b6cdaae406cd752d3272656f5fe0 (diff)
downloadspark-c5b0b296b842926b5c07531a5affe8984bc799c5.tar.gz
spark-c5b0b296b842926b5c07531a5affe8984bc799c5.tar.bz2
spark-c5b0b296b842926b5c07531a5affe8984bc799c5.zip
[SPARK-6765] Enable scalastyle on test code.
Turn scalastyle on for all test code. Most of the violations have been resolved in my previous pull requests: Core: https://github.com/apache/spark/pull/5484 SQL: https://github.com/apache/spark/pull/5412 MLlib: https://github.com/apache/spark/pull/5411 GraphX: https://github.com/apache/spark/pull/5410 Streaming: https://github.com/apache/spark/pull/5409 Author: Reynold Xin <rxin@databricks.com> Closes #5486 from rxin/test-style-enable and squashes the following commits: 01683de [Reynold Xin] Fixed new code. a4ab46e [Reynold Xin] Fixed tests. 20adbc8 [Reynold Xin] Missed one violation. 5e36521 [Reynold Xin] [SPARK-6765] Enable scalastyle on test code.
-rw-r--r--core/src/test/scala/org/apache/spark/deploy/ClientSuite.scala1
-rwxr-xr-xdev/scalastyle5
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala3
-rw-r--r--sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala15
-rw-r--r--sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala7
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala30
-rw-r--r--streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala4
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientSuite.scala19
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala6
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala2
10 files changed, 52 insertions, 40 deletions
diff --git a/core/src/test/scala/org/apache/spark/deploy/ClientSuite.scala b/core/src/test/scala/org/apache/spark/deploy/ClientSuite.scala
index 518073dcbb..745f9eeee7 100644
--- a/core/src/test/scala/org/apache/spark/deploy/ClientSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/ClientSuite.scala
@@ -46,5 +46,4 @@ class ClientSuite extends FunSuite with Matchers {
// Invalid syntax.
ClientArguments.isValidJarUrl("hdfs:") should be (false)
}
-
}
diff --git a/dev/scalastyle b/dev/scalastyle
index 86919227ed..4e03f89ed5 100755
--- a/dev/scalastyle
+++ b/dev/scalastyle
@@ -18,9 +18,10 @@
#
echo -e "q\n" | build/sbt -Phive -Phive-thriftserver scalastyle > scalastyle.txt
+echo -e "q\n" | build/sbt -Phive -Phive-thriftserver test:scalastyle >> scalastyle.txt
# Check style with YARN built too
-echo -e "q\n" | build/sbt -Pyarn -Phadoop-2.2 -Dhadoop.version=2.2.0 scalastyle \
- >> scalastyle.txt
+echo -e "q\n" | build/sbt -Pyarn -Phadoop-2.2 -Dhadoop.version=2.2.0 scalastyle >> scalastyle.txt
+echo -e "q\n" | build/sbt -Pyarn -Phadoop-2.2 -Dhadoop.version=2.2.0 test:scalastyle >> scalastyle.txt
ERRORS=$(cat scalastyle.txt | awk '{if($1~/error/)print}')
rm scalastyle.txt
diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala
index 61c46c85a7..81ef831c42 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala
@@ -150,7 +150,8 @@ class VectorIndexerSuite extends FunSuite with MLlibTestSparkContext {
val vectorIndexer = getIndexer.setMaxCategories(maxCategories)
val model = vectorIndexer.fit(data)
val categoryMaps = model.categoryMaps
- assert(categoryMaps.keys.toSet === categoricalFeatures) // Chose correct categorical features
+ // Chose correct categorical features
+ assert(categoryMaps.keys.toSet === categoricalFeatures)
val transformed = model.transform(data).select("indexed")
val indexedRDD: RDD[Vector] = transformed.map(_.getAs[Vector](0))
val featureAttrs = AttributeGroup.fromStructField(transformed.schema("indexed"))
diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala
index 75738fa22b..6d1d7c3a4e 100644
--- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala
+++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala
@@ -1,13 +1,12 @@
/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
index bf20acecb1..4cf95e7bdf 100644
--- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
+++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
@@ -18,6 +18,7 @@
package org.apache.spark.sql.hive.thriftserver
import java.io.File
+import java.net.URL
import java.sql.{Date, DriverManager, Statement}
import scala.collection.mutable.ArrayBuffer
@@ -41,7 +42,7 @@ import org.apache.spark.sql.hive.HiveShim
import org.apache.spark.util.Utils
object TestData {
- def getTestDataFilePath(name: String) = {
+ def getTestDataFilePath(name: String): URL = {
Thread.currentThread().getContextClassLoader.getResource(s"data/files/$name")
}
@@ -50,7 +51,7 @@ object TestData {
}
class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest {
- override def mode = ServerMode.binary
+ override def mode: ServerMode.Value = ServerMode.binary
private def withCLIServiceClient(f: ThriftCLIServiceClient => Unit): Unit = {
// Transport creation logics below mimics HiveConnection.createBinaryTransport
@@ -337,7 +338,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest {
}
class HiveThriftHttpServerSuite extends HiveThriftJdbcTest {
- override def mode = ServerMode.http
+ override def mode: ServerMode.Value = ServerMode.http
test("JDBC query execution") {
withJdbcStatement { statement =>
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
index 83f97128c5..a787fa5546 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
@@ -17,15 +17,12 @@
package org.apache.spark.sql.hive
-import java.io.File
-
import com.google.common.io.Files
+
import org.apache.spark.sql.{QueryTest, _}
import org.apache.spark.sql.hive.test.TestHive
-import org.apache.spark.util.Utils
-/* Implicits */
import org.apache.spark.sql.hive.test.TestHive._
-
+import org.apache.spark.util.Utils
class QueryPartitionSuite extends QueryTest {
@@ -37,23 +34,28 @@ class QueryPartitionSuite extends QueryTest {
testData.registerTempTable("testData")
val tmpDir = Files.createTempDir()
- //create the table for test
- sql(s"CREATE TABLE table_with_partition(key int,value string) PARTITIONED by (ds string) location '${tmpDir.toURI.toString}' ")
- sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='1') SELECT key,value FROM testData")
- sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='2') SELECT key,value FROM testData")
- sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='3') SELECT key,value FROM testData")
- sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='4') SELECT key,value FROM testData")
+ // create the table for test
+ sql(s"CREATE TABLE table_with_partition(key int,value string) " +
+ s"PARTITIONED by (ds string) location '${tmpDir.toURI.toString}' ")
+ sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='1') " +
+ "SELECT key,value FROM testData")
+ sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='2') " +
+ "SELECT key,value FROM testData")
+ sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='3') " +
+ "SELECT key,value FROM testData")
+ sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='4') " +
+ "SELECT key,value FROM testData")
- //test for the exist path
+ // test for the exist path
checkAnswer(sql("select key,value from table_with_partition"),
testData.toSchemaRDD.collect ++ testData.toSchemaRDD.collect
++ testData.toSchemaRDD.collect ++ testData.toSchemaRDD.collect)
- //delect the path of one partition
+ // delete the path of one partition
val folders = tmpDir.listFiles.filter(_.isDirectory)
Utils.deleteRecursively(folders(0))
- //test for affter delete the path
+ // test for after delete the path
checkAnswer(sql("select key,value from table_with_partition"),
testData.toSchemaRDD.collect ++ testData.toSchemaRDD.collect
++ testData.toSchemaRDD.collect)
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala
index 2b9d164500..94b1985116 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/ui/StreamingJobProgressListenerSuite.scala
@@ -21,14 +21,14 @@ import org.scalatest.Matchers
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.scheduler._
-import org.apache.spark.streaming.{Time, Milliseconds, TestSuiteBase}
+import org.apache.spark.streaming.{Duration, Time, Milliseconds, TestSuiteBase}
class StreamingJobProgressListenerSuite extends TestSuiteBase with Matchers {
val input = (1 to 4).map(Seq(_)).toSeq
val operation = (d: DStream[Int]) => d.map(x => x)
- override def batchDuration = Milliseconds(100)
+ override def batchDuration: Duration = Milliseconds(100)
test("onBatchSubmitted, onBatchStarted, onBatchCompleted, " +
"onReceiverStarted, onReceiverError, onReceiverStopped") {
diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientSuite.scala
index 92f04b4b85..c1b94ac9c5 100644
--- a/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientSuite.scala
@@ -232,19 +232,26 @@ class ClientSuite extends FunSuite with Matchers with BeforeAndAfterAll {
testCode(conf)
}
- def newEnv = MutableHashMap[String, String]()
+ def newEnv: MutableHashMap[String, String] = MutableHashMap[String, String]()
- def classpath(env: MutableHashMap[String, String]) = env(Environment.CLASSPATH.name).split(":|;|<CPS>")
+ def classpath(env: MutableHashMap[String, String]): Array[String] =
+ env(Environment.CLASSPATH.name).split(":|;|<CPS>")
- def flatten(a: Option[Seq[String]], b: Option[Seq[String]]) = (a ++ b).flatten.toArray
+ def flatten(a: Option[Seq[String]], b: Option[Seq[String]]): Array[String] =
+ (a ++ b).flatten.toArray
- def getFieldValue[A, B](clazz: Class[_], field: String, defaults: => B)(mapTo: A => B): B =
- Try(clazz.getField(field)).map(_.get(null).asInstanceOf[A]).toOption.map(mapTo).getOrElse(defaults)
+ def getFieldValue[A, B](clazz: Class[_], field: String, defaults: => B)(mapTo: A => B): B = {
+ Try(clazz.getField(field))
+ .map(_.get(null).asInstanceOf[A])
+ .toOption
+ .map(mapTo)
+ .getOrElse(defaults)
+ }
def getFieldValue2[A: ClassTag, A1: ClassTag, B](
clazz: Class[_],
field: String,
- defaults: => B)(mapTo: A => B)(mapTo1: A1 => B) : B = {
+ defaults: => B)(mapTo: A => B)(mapTo1: A1 => B): B = {
Try(clazz.getField(field)).map(_.get(null)).map {
case v: A => mapTo(v)
case v1: A1 => mapTo1(v1)
diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
index c09b01bafc..455f1019d8 100644
--- a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
@@ -79,7 +79,7 @@ class YarnAllocatorSuite extends FunSuite with Matchers with BeforeAndAfterEach
}
class MockSplitInfo(host: String) extends SplitInfo(null, host, null, 1, null) {
- override def equals(other: Any) = false
+ override def equals(other: Any): Boolean = false
}
def createAllocator(maxExecutors: Int = 5): YarnAllocator = {
@@ -118,7 +118,9 @@ class YarnAllocatorSuite extends FunSuite with Matchers with BeforeAndAfterEach
handler.getNumExecutorsRunning should be (1)
handler.allocatedContainerToHostMap.get(container.getId).get should be ("host1")
handler.allocatedHostToContainersMap.get("host1").get should contain (container.getId)
- rmClient.getMatchingRequests(container.getPriority, "host1", containerResource).size should be (0)
+
+ val size = rmClient.getMatchingRequests(container.getPriority, "host1", containerResource).size
+ size should be (0)
}
test("some containers allocated") {
diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala
index 4194f36499..9395316b71 100644
--- a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala
@@ -46,7 +46,7 @@ class YarnSparkHadoopUtilSuite extends FunSuite with Matchers with Logging {
logWarning("Cannot execute bash, skipping bash tests.")
}
- def bashTest(name: String)(fn: => Unit) =
+ def bashTest(name: String)(fn: => Unit): Unit =
if (hasBash) test(name)(fn) else ignore(name)(fn)
bashTest("shell script escaping") {