aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorXin Ren <iamshrek@126.com>2016-08-30 11:24:55 +0100
committerSean Owen <sowen@cloudera.com>2016-08-30 11:24:55 +0100
commit27209252f09ff73c58e60c6df8aaba73b308088c (patch)
tree23eb0a48d34fab230d8a48a0f0303299ace47c0d /mllib
parentd4eee9932edf1a489d7fe9120a0f003150834df6 (diff)
downloadspark-27209252f09ff73c58e60c6df8aaba73b308088c.tar.gz
spark-27209252f09ff73c58e60c6df8aaba73b308088c.tar.bz2
spark-27209252f09ff73c58e60c6df8aaba73b308088c.zip
[MINOR][MLLIB][SQL] Clean up unused variables and unused import
## What changes were proposed in this pull request? Clean up unused variables and unused import statements, unnecessary `return` and `toArray`, and some more style improvement, when I walk through the code examples. ## How was this patch tested? Testet manually on local laptop. Author: Xin Ren <iamshrek@126.com> Closes #14836 from keypointt/codeWalkThroughML.
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala8
5 files changed, 8 insertions, 8 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala
index 96d0bdee9e..902f84f862 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/Interaction.scala
@@ -136,7 +136,7 @@ class Interaction @Since("1.6.0") (@Since("1.6.0") override val uid: String) ext
case _: VectorUDT =>
val attrs = AttributeGroup.fromStructField(f).attributes.getOrElse(
throw new SparkException("Vector attributes must be defined for interaction."))
- attrs.map(getNumFeatures).toArray
+ attrs.map(getNumFeatures)
}
new FeatureEncoder(numFeatures)
}.toArray
diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala
index 1ea80cb46a..a7992debe6 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/r/IsotonicRegressionWrapper.scala
@@ -23,7 +23,7 @@ import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.ml.{Pipeline, PipelineModel}
-import org.apache.spark.ml.attribute.{AttributeGroup}
+import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.regression.{IsotonicRegression, IsotonicRegressionModel}
import org.apache.spark.ml.util._
diff --git a/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala b/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala
index e79b1f3164..e539deca4b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/util/stopwatches.scala
@@ -20,7 +20,7 @@ package org.apache.spark.ml.util
import scala.collection.mutable
import org.apache.spark.SparkContext
-import org.apache.spark.util.LongAccumulator;
+import org.apache.spark.util.LongAccumulator
/**
* Abstract class for stopwatches.
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
index 56fb2d33c2..33a1f18bcc 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
@@ -164,7 +164,7 @@ object ChiSqSelectorModel extends Loader[ChiSqSelectorModel] {
case Row(feature: Int) => (feature)
}.collect()
- return new ChiSqSelectorModel(features)
+ new ChiSqSelectorModel(features)
}
}
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
index c2bc1f17cc..6d60136ddc 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
@@ -438,10 +438,10 @@ object RandomRDDs {
@DeveloperApi
@Since("1.6.0")
def randomJavaRDD[T](
- jsc: JavaSparkContext,
- generator: RandomDataGenerator[T],
- size: Long): JavaRDD[T] = {
- randomJavaRDD(jsc, generator, size, 0);
+ jsc: JavaSparkContext,
+ generator: RandomDataGenerator[T],
+ size: Long): JavaRDD[T] = {
+ randomJavaRDD(jsc, generator, size, 0)
}
// TODO Generate RDD[Vector] from multivariate distributions.