aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-04-12 00:43:28 -0700
committerReynold Xin <rxin@databricks.com>2016-04-12 00:43:28 -0700
commitb0f5497e9520575e5082fa8ce8be5569f43abe74 (patch)
treeefd349be7227cf20616712fc7376b7c2f11f6614 /examples
parent678b96e77bf77a64b8df14b19db5a3bb18febfe3 (diff)
downloadspark-b0f5497e9520575e5082fa8ce8be5569f43abe74.tar.gz
spark-b0f5497e9520575e5082fa8ce8be5569f43abe74.tar.bz2
spark-b0f5497e9520575e5082fa8ce8be5569f43abe74.zip
[SPARK-14508][BUILD] Add a new ScalaStyle Rule `OmitBracesInCase`
## What changes were proposed in this pull request? According to the [Spark Code Style Guide](https://cwiki.apache.org/confluence/display/SPARK/Spark+Code+Style+Guide) and [Scala Style Guide](http://docs.scala-lang.org/style/control-structures.html#curlybraces), we had better enforce the following rule. ``` case: Always omit braces in case clauses. ``` This PR makes a new ScalaStyle rule, 'OmitBracesInCase', and enforces it to the code. ## How was this patch tested? Pass the Jenkins tests (including Scala style checking) Author: Dongjoon Hyun <dongjoon@apache.org> Closes #12280 from dongjoon-hyun/SPARK-14508.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala6
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala6
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalALS.scala6
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala6
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala6
5 files changed, 10 insertions, 20 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala b/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala
index 973b005f91..ca4eea2356 100644
--- a/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala
@@ -106,9 +106,8 @@ object CassandraCQLTest {
println("Count: " + casRdd.count)
val productSaleRDD = casRdd.map {
- case (key, value) => {
+ case (key, value) =>
(ByteBufferUtil.string(value.get("prod_id")), ByteBufferUtil.toInt(value.get("quantity")))
- }
}
val aggregatedRDD = productSaleRDD.reduceByKey(_ + _)
aggregatedRDD.collect().foreach {
@@ -116,11 +115,10 @@ object CassandraCQLTest {
}
val casoutputCF = aggregatedRDD.map {
- case (productId, saleCount) => {
+ case (productId, saleCount) =>
val outKey = Collections.singletonMap("prod_id", ByteBufferUtil.bytes(productId))
val outVal = Collections.singletonList(ByteBufferUtil.bytes(saleCount))
(outKey, outVal)
- }
}
casoutputCF.saveAsNewAPIHadoopFile(
diff --git a/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala b/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala
index 6a8f73ad00..eff840d36e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala
@@ -90,9 +90,8 @@ object CassandraTest {
// Let us first get all the paragraphs from the retrieved rows
val paraRdd = casRdd.map {
- case (key, value) => {
+ case (key, value) =>
ByteBufferUtil.string(value.get(ByteBufferUtil.bytes("para")).value())
- }
}
// Lets get the word count in paras
@@ -103,7 +102,7 @@ object CassandraTest {
}
counts.map {
- case (word, count) => {
+ case (word, count) =>
val colWord = new org.apache.cassandra.thrift.Column()
colWord.setName(ByteBufferUtil.bytes("word"))
colWord.setValue(ByteBufferUtil.bytes(word))
@@ -122,7 +121,6 @@ object CassandraTest {
mutations.get(1).setColumn_or_supercolumn(new ColumnOrSuperColumn())
mutations.get(1).column_or_supercolumn.setColumn(colCount)
(outputkey, mutations)
- }
}.saveAsNewAPIHadoopFile("casDemo", classOf[ByteBuffer], classOf[List[Mutation]],
classOf[ColumnFamilyOutputFormat], job.getConfiguration)
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala b/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
index af5f216f28..fa10101955 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
@@ -104,16 +104,14 @@ object LocalALS {
def main(args: Array[String]) {
args match {
- case Array(m, u, f, iters) => {
+ case Array(m, u, f, iters) =>
M = m.toInt
U = u.toInt
F = f.toInt
ITERATIONS = iters.toInt
- }
- case _ => {
+ case _ =>
System.err.println("Usage: LocalALS <M> <U> <F> <iters>")
System.exit(1)
- }
}
showWarning()
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
index a0bb5dabf4..0b5d31c0ff 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
@@ -118,17 +118,15 @@ object OneVsRestExample {
val inputData = sqlContext.read.format("libsvm").load(params.input)
// compute the train/test split: if testInput is not provided use part of input.
val data = params.testInput match {
- case Some(t) => {
+ case Some(t) =>
// compute the number of features in the training set.
val numFeatures = inputData.first().getAs[Vector](1).size
val testData = sqlContext.read.option("numFeatures", numFeatures.toString)
.format("libsvm").load(t)
Array[DataFrame](inputData, testData)
- }
- case None => {
+ case None =>
val f = params.fracTest
inputData.randomSplit(Array(1 - f, f), seed = 12345)
- }
}
val Array(train, test) = data.map(_.cache())
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
index c263f4f595..ee811d3aa1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
@@ -180,7 +180,7 @@ object DecisionTreeRunner {
}
// For classification, re-index classes if needed.
val (examples, classIndexMap, numClasses) = algo match {
- case Classification => {
+ case Classification =>
// classCounts: class --> # examples in class
val classCounts = origExamples.map(_.label).countByValue()
val sortedClasses = classCounts.keys.toList.sorted
@@ -209,7 +209,6 @@ object DecisionTreeRunner {
println(s"$c\t$frac\t${classCounts(c)}")
}
(examples, classIndexMap, numClasses)
- }
case Regression =>
(origExamples, null, 0)
case _ =>
@@ -225,7 +224,7 @@ object DecisionTreeRunner {
case "libsvm" => MLUtils.loadLibSVMFile(sc, testInput, numFeatures)
}
algo match {
- case Classification => {
+ case Classification =>
// classCounts: class --> # examples in class
val testExamples = {
if (classIndexMap.isEmpty) {
@@ -235,7 +234,6 @@ object DecisionTreeRunner {
}
}
Array(examples, testExamples)
- }
case Regression =>
Array(examples, origTestExamples)
}