From 9f678e97549b19d6d979b22fa4079094ce9fb2c0 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Sat, 16 Apr 2016 14:56:23 +0100 Subject: [MINOR] Remove inappropriate type notation and extra anonymous closure within functional transformations ## What changes were proposed in this pull request? This PR removes - Inappropriate type notations For example, from ```scala words.foreachRDD { (rdd: RDD[String], time: Time) => ... ``` to ```scala words.foreachRDD { (rdd, time) => ... ``` - Extra anonymous closure within functional transformations. For example, ```scala .map(item => { ... }) ``` which can be just simply as below: ```scala .map { item => ... } ``` and corrects some obvious style nits. ## How was this patch tested? This was tested after adding rules in `scalastyle-config.xml`, which ended up with not finding all perfectly. The rules applied were below: - For the first correction, ```xml (?m)\.[a-zA-Z_][a-zA-Z0-9]*\(\s*[^,]+s*=>\s*\{[^\}]+\}\s*\) ``` ```xml \.[a-zA-Z_][a-zA-Z0-9]*\s*[\{|\(]([^\n>,]+=>)?\s*\{([^()]|(?R))*\}^[,] ``` - For the second correction ```xml \.[a-zA-Z_][a-zA-Z0-9]*\s*[\{|\(]\s*\([^):]*:R))*\}^[,] ``` **Those rules were not added** Author: hyukjinkwon Closes #12413 from HyukjinKwon/SPARK-style. --- .../org/apache/spark/mllib/api/python/Word2VecModelWrapper.scala | 4 +++- .../apache/spark/mllib/evaluation/BinaryClassificationMetrics.scala | 3 +-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'mllib') diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/Word2VecModelWrapper.scala b/mllib/src/main/scala/org/apache/spark/mllib/api/python/Word2VecModelWrapper.scala index 05273c3434..4b4ed2291d 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/Word2VecModelWrapper.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/api/python/Word2VecModelWrapper.scala @@ -56,7 +56,9 @@ private[python] class Word2VecModelWrapper(model: Word2VecModel) { } def getVectors: JMap[String, JList[Float]] = { - model.getVectors.map({case (k, v) => (k, v.toList.asJava)}).asJava + model.getVectors.map { case (k, v) => + (k, v.toList.asJava) + }.asJava } def save(sc: SparkContext, path: String): Unit = model.save(sc, path) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/evaluation/BinaryClassificationMetrics.scala b/mllib/src/main/scala/org/apache/spark/mllib/evaluation/BinaryClassificationMetrics.scala index 0a7a45b4f4..92cd7f22dc 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/evaluation/BinaryClassificationMetrics.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/evaluation/BinaryClassificationMetrics.scala @@ -189,8 +189,7 @@ class BinaryClassificationMetrics @Since("1.3.0") ( Iterator(agg) }.collect() val partitionwiseCumulativeCounts = - agg.scanLeft(new BinaryLabelCounter())( - (agg: BinaryLabelCounter, c: BinaryLabelCounter) => agg.clone() += c) + agg.scanLeft(new BinaryLabelCounter())((agg, c) => agg.clone() += c) val totalCount = partitionwiseCumulativeCounts.last logInfo(s"Total counts: $totalCount") val cumulativeCounts = binnedCounts.mapPartitionsWithIndex( -- cgit v1.2.3