aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2015-12-05 08:15:30 +0800
committerReynold Xin <rxin@databricks.com>2015-12-05 08:15:30 +0800
commitb7204e1d41271d2e8443484371770936664350b1 (patch)
tree3b09d003dce3b482282e3ae21b893fe57e607128
parentd64806b37373c5cc4fd158a9f5005743bd00bf28 (diff)
downloadspark-b7204e1d41271d2e8443484371770936664350b1.tar.gz
spark-b7204e1d41271d2e8443484371770936664350b1.tar.bz2
spark-b7204e1d41271d2e8443484371770936664350b1.zip
[SPARK-12112][BUILD] Upgrade to SBT 0.13.9
We should upgrade to SBT 0.13.9, since this is a requirement in order to use SBT's new Maven-style resolution features (which will be done in a separate patch, because it's blocked by some binary compatibility issues in the POM reader plugin). I also upgraded Scalastyle to version 0.8.0, which was necessary in order to fix a Scala 2.10.5 compatibility issue (see https://github.com/scalastyle/scalastyle/issues/156). The newer Scalastyle is slightly stricter about whitespace surrounding tokens, so I fixed the new style violations. Author: Josh Rosen <joshrosen@databricks.com> Closes #10112 from JoshRosen/upgrade-to-sbt-0.13.9.
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala10
-rw-r--r--core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala8
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala6
-rw-r--r--external/flume-sink/src/test/scala/org/apache/spark/streaming/flume/sink/SparkSinkSuite.scala4
-rw-r--r--graphx/src/test/scala/org/apache/spark/graphx/lib/TriangleCountSuite.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/param/params.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala4
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/classification/DecisionTreeClassifierSuite.scala4
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/regression/DecisionTreeRegressorSuite.scala6
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/tree/DecisionTreeSuite.scala14
-rw-r--r--pom.xml2
-rw-r--r--project/build.properties2
-rw-r--r--project/plugins.sbt7
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastSuite.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala8
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/RunLengthEncodingSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala2
20 files changed, 47 insertions, 48 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala b/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala
index ccffb36652..220b20bf7c 100644
--- a/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala
@@ -45,7 +45,7 @@ private[deploy] object JsonProtocol {
("id" -> obj.id) ~
("name" -> obj.desc.name) ~
("cores" -> obj.desc.maxCores) ~
- ("user" -> obj.desc.user) ~
+ ("user" -> obj.desc.user) ~
("memoryperslave" -> obj.desc.memoryPerExecutorMB) ~
("submitdate" -> obj.submitDate.toString) ~
("state" -> obj.state.toString) ~
diff --git a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
index 46ed5c04f4..007a71f87c 100644
--- a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
@@ -101,21 +101,21 @@ class RDDSuite extends SparkFunSuite with SharedSparkContext {
}
test("SparkContext.union creates UnionRDD if at least one RDD has no partitioner") {
- val rddWithPartitioner = sc.parallelize(Seq(1->true)).partitionBy(new HashPartitioner(1))
- val rddWithNoPartitioner = sc.parallelize(Seq(2->true))
+ val rddWithPartitioner = sc.parallelize(Seq(1 -> true)).partitionBy(new HashPartitioner(1))
+ val rddWithNoPartitioner = sc.parallelize(Seq(2 -> true))
val unionRdd = sc.union(rddWithNoPartitioner, rddWithPartitioner)
assert(unionRdd.isInstanceOf[UnionRDD[_]])
}
test("SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners") {
- val rddWithPartitioner = sc.parallelize(Seq(1->true)).partitionBy(new HashPartitioner(1))
+ val rddWithPartitioner = sc.parallelize(Seq(1 -> true)).partitionBy(new HashPartitioner(1))
val unionRdd = sc.union(rddWithPartitioner, rddWithPartitioner)
assert(unionRdd.isInstanceOf[PartitionerAwareUnionRDD[_]])
}
test("PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner") {
- val rddWithPartitioner = sc.parallelize(Seq(1->true)).partitionBy(new HashPartitioner(1))
- val rddWithNoPartitioner = sc.parallelize(Seq(2->true))
+ val rddWithPartitioner = sc.parallelize(Seq(1 -> true)).partitionBy(new HashPartitioner(1))
+ val rddWithNoPartitioner = sc.parallelize(Seq(2 -> true))
intercept[IllegalArgumentException] {
new PartitionerAwareUnionRDD(sc, Seq(rddWithNoPartitioner, rddWithPartitioner))
}
diff --git a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala
index e428414cf6..f81fe31131 100644
--- a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala
@@ -144,10 +144,10 @@ class KryoSerializerSuite extends SparkFunSuite with SharedSparkContext {
check(mutable.Map("one" -> 1, "two" -> 2))
check(mutable.HashMap(1 -> "one", 2 -> "two"))
check(mutable.HashMap("one" -> 1, "two" -> 2))
- check(List(Some(mutable.HashMap(1->1, 2->2)), None, Some(mutable.HashMap(3->4))))
+ check(List(Some(mutable.HashMap(1 -> 1, 2 -> 2)), None, Some(mutable.HashMap(3 -> 4))))
check(List(
mutable.HashMap("one" -> 1, "two" -> 2),
- mutable.HashMap(1->"one", 2->"two", 3->"three")))
+ mutable.HashMap(1 -> "one", 2 -> "two", 3 -> "three")))
}
test("Bug: SPARK-10251") {
@@ -174,10 +174,10 @@ class KryoSerializerSuite extends SparkFunSuite with SharedSparkContext {
check(mutable.Map("one" -> 1, "two" -> 2))
check(mutable.HashMap(1 -> "one", 2 -> "two"))
check(mutable.HashMap("one" -> 1, "two" -> 2))
- check(List(Some(mutable.HashMap(1->1, 2->2)), None, Some(mutable.HashMap(3->4))))
+ check(List(Some(mutable.HashMap(1 -> 1, 2 -> 2)), None, Some(mutable.HashMap(3 -> 4))))
check(List(
mutable.HashMap("one" -> 1, "two" -> 2),
- mutable.HashMap(1->"one", 2->"two", 3->"three")))
+ mutable.HashMap(1 -> "one", 2 -> "two", 3 -> "three")))
}
test("ranges") {
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
index bea7a47cb2..2fcccb22dd 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
@@ -51,8 +51,8 @@ object PageView extends Serializable {
*/
// scalastyle:on
object PageViewGenerator {
- val pages = Map("http://foo.com/" -> .7,
- "http://foo.com/news" -> 0.2,
+ val pages = Map("http://foo.com/" -> .7,
+ "http://foo.com/news" -> 0.2,
"http://foo.com/contact" -> .1)
val httpStatus = Map(200 -> .95,
404 -> .05)
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
index 4ef238606f..723616817f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
@@ -86,8 +86,10 @@ object PageViewStream {
.map("Unique active users: " + _)
// An external dataset we want to join to this stream
- val userList = ssc.sparkContext.parallelize(
- Map(1 -> "Patrick Wendell", 2 -> "Reynold Xin", 3 -> "Matei Zaharia").toSeq)
+ val userList = ssc.sparkContext.parallelize(Seq(
+ 1 -> "Patrick Wendell",
+ 2 -> "Reynold Xin",
+ 3 -> "Matei Zaharia"))
metric match {
case "pageCounts" => pageCounts.print()
diff --git a/external/flume-sink/src/test/scala/org/apache/spark/streaming/flume/sink/SparkSinkSuite.scala b/external/flume-sink/src/test/scala/org/apache/spark/streaming/flume/sink/SparkSinkSuite.scala
index d2654700ea..941fde45cd 100644
--- a/external/flume-sink/src/test/scala/org/apache/spark/streaming/flume/sink/SparkSinkSuite.scala
+++ b/external/flume-sink/src/test/scala/org/apache/spark/streaming/flume/sink/SparkSinkSuite.scala
@@ -36,11 +36,11 @@ import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
// Spark core main, which has too many dependencies to require here manually.
// For this reason, we continue to use FunSuite and ignore the scalastyle checks
// that fail if this is detected.
-//scalastyle:off
+// scalastyle:off
import org.scalatest.FunSuite
class SparkSinkSuite extends FunSuite {
-//scalastyle:on
+// scalastyle:on
val eventsPerBatch = 1000
val channelCapacity = 5000
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/lib/TriangleCountSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/lib/TriangleCountSuite.scala
index c47552cf3a..608e43cf3f 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/lib/TriangleCountSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/lib/TriangleCountSuite.scala
@@ -26,7 +26,7 @@ class TriangleCountSuite extends SparkFunSuite with LocalSparkContext {
test("Count a single triangle") {
withSpark { sc =>
- val rawEdges = sc.parallelize(Array( 0L->1L, 1L->2L, 2L->0L ), 2)
+ val rawEdges = sc.parallelize(Array( 0L -> 1L, 1L -> 2L, 2L -> 0L ), 2)
val graph = Graph.fromEdgeTuples(rawEdges, true).cache()
val triangleCount = graph.triangleCount()
val verts = triangleCount.vertices
diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
index d182b0a988..ee7e89edd8 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
@@ -82,7 +82,9 @@ class Param[T](val parent: String, val name: String, val doc: String, val isVali
def w(value: T): ParamPair[T] = this -> value
/** Creates a param pair with the given value (for Scala). */
+ // scalastyle:off
def ->(value: T): ParamPair[T] = ParamPair(this, value)
+ // scalastyle:on
/** Encodes a param value into JSON, which can be decoded by [[jsonDecode()]]. */
def jsonEncode(value: T): String = {
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala
index a7eaed51b4..911b4b9237 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala
@@ -152,8 +152,8 @@ private[stat] object StudentTTest extends StreamingTestMethod with Logging {
private[stat] object StreamingTestMethod {
// Note: after new `StreamingTestMethod`s are implemented, please update this map.
private final val TEST_NAME_TO_OBJECT: Map[String, StreamingTestMethod] = Map(
- "welch"->WelchTTest,
- "student"->StudentTTest)
+ "welch" -> WelchTTest,
+ "student" -> StudentTTest)
def getTestMethodFromName(method: String): StreamingTestMethod =
TEST_NAME_TO_OBJECT.get(method) match {
diff --git a/mllib/src/test/scala/org/apache/spark/ml/classification/DecisionTreeClassifierSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/classification/DecisionTreeClassifierSuite.scala
index 92b8f84144..fda2711fed 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/classification/DecisionTreeClassifierSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/classification/DecisionTreeClassifierSuite.scala
@@ -73,7 +73,7 @@ class DecisionTreeClassifierSuite extends SparkFunSuite with MLlibTestSparkConte
.setMaxDepth(2)
.setMaxBins(100)
.setSeed(1)
- val categoricalFeatures = Map(0 -> 3, 1-> 3)
+ val categoricalFeatures = Map(0 -> 3, 1 -> 3)
val numClasses = 2
compareAPIs(categoricalDataPointsRDD, dt, categoricalFeatures, numClasses)
}
@@ -214,7 +214,7 @@ class DecisionTreeClassifierSuite extends SparkFunSuite with MLlibTestSparkConte
.setMaxBins(2)
.setMaxDepth(2)
.setMinInstancesPerNode(2)
- val categoricalFeatures = Map(0 -> 2, 1-> 2)
+ val categoricalFeatures = Map(0 -> 2, 1 -> 2)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
diff --git a/mllib/src/test/scala/org/apache/spark/ml/regression/DecisionTreeRegressorSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/regression/DecisionTreeRegressorSuite.scala
index e0d5afa7a7..6999a910c3 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/regression/DecisionTreeRegressorSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/regression/DecisionTreeRegressorSuite.scala
@@ -50,7 +50,7 @@ class DecisionTreeRegressorSuite extends SparkFunSuite with MLlibTestSparkContex
.setMaxDepth(2)
.setMaxBins(100)
.setSeed(1)
- val categoricalFeatures = Map(0 -> 3, 1-> 3)
+ val categoricalFeatures = Map(0 -> 3, 1 -> 3)
compareAPIs(categoricalDataPointsRDD, dt, categoricalFeatures)
}
@@ -59,12 +59,12 @@ class DecisionTreeRegressorSuite extends SparkFunSuite with MLlibTestSparkContex
.setImpurity("variance")
.setMaxDepth(2)
.setMaxBins(100)
- val categoricalFeatures = Map(0 -> 2, 1-> 2)
+ val categoricalFeatures = Map(0 -> 2, 1 -> 2)
compareAPIs(categoricalDataPointsRDD, dt, categoricalFeatures)
}
test("copied model must have the same parent") {
- val categoricalFeatures = Map(0 -> 2, 1-> 2)
+ val categoricalFeatures = Map(0 -> 2, 1 -> 2)
val df = TreeTests.setMetadata(categoricalDataPointsRDD, categoricalFeatures, numClasses = 0)
val model = new DecisionTreeRegressor()
.setImpurity("variance")
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/tree/DecisionTreeSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/tree/DecisionTreeSuite.scala
index 1a4299db4e..bf8fe1acac 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/tree/DecisionTreeSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/tree/DecisionTreeSuite.scala
@@ -64,7 +64,7 @@ class DecisionTreeSuite extends SparkFunSuite with MLlibTestSparkContext {
maxDepth = 2,
numClasses = 2,
maxBins = 100,
- categoricalFeaturesInfo = Map(0 -> 2, 1-> 2))
+ categoricalFeaturesInfo = Map(0 -> 2, 1 -> 2))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
val (splits, bins) = DecisionTree.findSplitsBins(rdd, metadata)
@@ -178,7 +178,7 @@ class DecisionTreeSuite extends SparkFunSuite with MLlibTestSparkContext {
maxDepth = 2,
numClasses = 100,
maxBins = 100,
- categoricalFeaturesInfo = Map(0 -> 3, 1-> 3))
+ categoricalFeaturesInfo = Map(0 -> 3, 1 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(metadata.isUnordered(featureIndex = 0))
@@ -237,7 +237,7 @@ class DecisionTreeSuite extends SparkFunSuite with MLlibTestSparkContext {
maxDepth = 2,
numClasses = 100,
maxBins = 100,
- categoricalFeaturesInfo = Map(0 -> 10, 1-> 10))
+ categoricalFeaturesInfo = Map(0 -> 10, 1 -> 10))
// 2^(10-1) - 1 > 100, so categorical features will be ordered
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
@@ -421,7 +421,7 @@ class DecisionTreeSuite extends SparkFunSuite with MLlibTestSparkContext {
numClasses = 2,
maxDepth = 2,
maxBins = 100,
- categoricalFeaturesInfo = Map(0 -> 3, 1-> 3))
+ categoricalFeaturesInfo = Map(0 -> 3, 1 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
@@ -455,7 +455,7 @@ class DecisionTreeSuite extends SparkFunSuite with MLlibTestSparkContext {
Variance,
maxDepth = 2,
maxBins = 100,
- categoricalFeaturesInfo = Map(0 -> 3, 1-> 3))
+ categoricalFeaturesInfo = Map(0 -> 3, 1 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
@@ -484,7 +484,7 @@ class DecisionTreeSuite extends SparkFunSuite with MLlibTestSparkContext {
Variance,
maxDepth = 2,
maxBins = 100,
- categoricalFeaturesInfo = Map(0 -> 2, 1-> 2))
+ categoricalFeaturesInfo = Map(0 -> 2, 1 -> 2))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
assert(!metadata.isUnordered(featureIndex = 1))
@@ -788,7 +788,7 @@ class DecisionTreeSuite extends SparkFunSuite with MLlibTestSparkContext {
val rdd = sc.parallelize(arr)
val strategy = new Strategy(algo = Classification, impurity = Gini,
- maxBins = 2, maxDepth = 2, categoricalFeaturesInfo = Map(0 -> 2, 1-> 2),
+ maxBins = 2, maxDepth = 2, categoricalFeaturesInfo = Map(0 -> 2, 1 -> 2),
numClasses = 2, minInstancesPerNode = 2)
val rootNode = DecisionTree.train(rdd, strategy).topNode
diff --git a/pom.xml b/pom.xml
index 16e656d119..ae2ff8878b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2235,7 +2235,7 @@
<plugin>
<groupId>org.scalastyle</groupId>
<artifactId>scalastyle-maven-plugin</artifactId>
- <version>0.7.0</version>
+ <version>0.8.0</version>
<configuration>
<verbose>false</verbose>
<failOnViolation>true</failOnViolation>
diff --git a/project/build.properties b/project/build.properties
index 064ec843da..86ca875582 100644
--- a/project/build.properties
+++ b/project/build.properties
@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-sbt.version=0.13.7
+sbt.version=0.13.9
diff --git a/project/plugins.sbt b/project/plugins.sbt
index c06687d8f1..5e23224cf8 100644
--- a/project/plugins.sbt
+++ b/project/plugins.sbt
@@ -10,14 +10,9 @@ addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.2.0")
addSbtPlugin("com.github.mpeltonen" % "sbt-idea" % "1.6.0")
-// For Sonatype publishing
-//resolvers += Resolver.url("sbt-plugin-releases", new URL("http://scalasbt.artifactoryonline.com/scalasbt/sbt-plugin-releases/"))(Resolver.ivyStylePatterns)
-
-//addSbtPlugin("com.jsuereth" % "xsbt-gpg-plugin" % "0.6")
-
addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.7.4")
-addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "0.7.0")
+addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "0.8.0")
addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.1.6")
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastSuite.scala
index ab77a76448..a98e16c253 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastSuite.scala
@@ -734,7 +734,7 @@ class CastSuite extends SparkFunSuite with ExpressionEvalHelper {
val complex = Literal.create(
Row(
Seq("123", "true", "f"),
- Map("a" ->"123", "b" -> "true", "c" -> "f"),
+ Map("a" -> "123", "b" -> "true", "c" -> "f"),
Row(0)),
StructType(Seq(
StructField("a",
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala
index fdd745f48e..bb3e278697 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala
@@ -862,9 +862,9 @@ private[sql] object ParquetRelation extends Logging {
// The parquet compression short names
val shortParquetCompressionCodecNames = Map(
- "NONE" -> CompressionCodecName.UNCOMPRESSED,
+ "NONE" -> CompressionCodecName.UNCOMPRESSED,
"UNCOMPRESSED" -> CompressionCodecName.UNCOMPRESSED,
- "SNAPPY" -> CompressionCodecName.SNAPPY,
- "GZIP" -> CompressionCodecName.GZIP,
- "LZO" -> CompressionCodecName.LZO)
+ "SNAPPY" -> CompressionCodecName.SNAPPY,
+ "GZIP" -> CompressionCodecName.GZIP,
+ "LZO" -> CompressionCodecName.LZO)
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala
index 34dd96929e..706ff1f998 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala
@@ -35,7 +35,7 @@ class ColumnTypeSuite extends SparkFunSuite with Logging {
test("defaultSize") {
val checks = Map(
- NULL-> 0, BOOLEAN -> 1, BYTE -> 1, SHORT -> 2, INT -> 4, LONG -> 8,
+ NULL -> 0, BOOLEAN -> 1, BYTE -> 1, SHORT -> 2, INT -> 4, LONG -> 8,
FLOAT -> 4, DOUBLE -> 8, COMPACT_DECIMAL(15, 10) -> 8, LARGE_DECIMAL(20, 10) -> 12,
STRING -> 8, BINARY -> 16, STRUCT_TYPE -> 20, ARRAY_TYPE -> 16, MAP_TYPE -> 32)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/RunLengthEncodingSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/RunLengthEncodingSuite.scala
index ce3affba55..95642e93ae 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/RunLengthEncodingSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/RunLengthEncodingSuite.scala
@@ -100,11 +100,11 @@ class RunLengthEncodingSuite extends SparkFunSuite {
}
test(s"$RunLengthEncoding with $typeName: simple case") {
- skeleton(2, Seq(0 -> 2, 1 ->2))
+ skeleton(2, Seq(0 -> 2, 1 -> 2))
}
test(s"$RunLengthEncoding with $typeName: run length == 1") {
- skeleton(2, Seq(0 -> 1, 1 ->1))
+ skeleton(2, Seq(0 -> 1, 1 -> 1))
}
test(s"$RunLengthEncoding with $typeName: single long run") {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
index 4f2cad19bf..4339f7260d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
@@ -116,7 +116,7 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
// PhysicalRDD(nodeId = 1) -> Project(nodeId = 0)
val df = person.select('name)
testSparkPlanMetrics(df, 1, Map(
- 0L ->("Project", Map(
+ 0L -> ("Project", Map(
"number of rows" -> 2L)))
)
}