diff options
author | Reynold Xin <rxin@apache.org> | 2014-01-15 20:15:29 -0800 |
---|---|---|
committer | Reynold Xin <rxin@apache.org> | 2014-01-15 20:15:29 -0800 |
commit | 84595ea3e25d2f9578b3de34704da14eb02330fa (patch) | |
tree | 8a6e692d13748a345f645cf5a39cbaad37ff29aa /mllib/src/test | |
parent | 0675ca50f3d66afaa12ea2ec4159930f101413d5 (diff) | |
parent | 57fcfc75b3583eb99564fc0d1bb5f49aea53f684 (diff) | |
download | spark-84595ea3e25d2f9578b3de34704da14eb02330fa.tar.gz spark-84595ea3e25d2f9578b3de34704da14eb02330fa.tar.bz2 spark-84595ea3e25d2f9578b3de34704da14eb02330fa.zip |
Merge pull request #414 from soulmachine/code-style
Code clean up for mllib
* Removed unnecessary parentheses
* Removed unused imports
* Simplified `filter...size()` to `count ...`
* Removed obsoleted parameters' comments
Diffstat (limited to 'mllib/src/test')
7 files changed, 11 insertions, 22 deletions
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala index 34c67294e9..02ede71137 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala @@ -80,9 +80,9 @@ class LogisticRegressionSuite extends FunSuite with BeforeAndAfterAll with Shoul } def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) { - val numOffPredictions = predictions.zip(input).filter { case (prediction, expected) => - (prediction != expected.label) - }.size + val numOffPredictions = predictions.zip(input).count { case (prediction, expected) => + prediction != expected.label + } // At least 83% of the predictions should be on. ((input.length - numOffPredictions).toDouble / input.length) should be > 0.83 } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala index 6a957e3ddc..3357b86f9b 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala @@ -18,7 +18,6 @@ package org.apache.spark.mllib.classification import scala.util.Random -import scala.math.signum import scala.collection.JavaConversions._ import org.scalatest.BeforeAndAfterAll @@ -50,7 +49,7 @@ object SVMSuite { val x = Array.fill[Array[Double]](nPoints)( Array.fill[Double](weights.length)(rnd.nextDouble() * 2.0 - 1.0)) val y = x.map { xi => - val yD = (new DoubleMatrix(1, xi.length, xi:_*)).dot(weightsMat) + + val yD = new DoubleMatrix(1, xi.length, xi: _*).dot(weightsMat) + intercept + 0.01 * rnd.nextGaussian() if (yD < 0) 0.0 else 1.0 } @@ -72,9 +71,9 @@ class SVMSuite extends FunSuite with BeforeAndAfterAll { } def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) { - val numOffPredictions = predictions.zip(input).filter { case (prediction, expected) => - (prediction != expected.label) - }.size + val numOffPredictions = predictions.zip(input).count { case (prediction, expected) => + prediction != expected.label + } // At least 80% of the predictions should be on. assert(numOffPredictions < input.length / 5) } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala index 94245f6027..73657cac89 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/KMeansSuite.scala @@ -17,15 +17,12 @@ package org.apache.spark.mllib.clustering -import scala.util.Random import org.scalatest.BeforeAndAfterAll import org.scalatest.FunSuite import org.apache.spark.SparkContext -import org.apache.spark.SparkContext._ -import org.jblas._ class KMeansSuite extends FunSuite with BeforeAndAfterAll { @transient private var sc: SparkContext = _ diff --git a/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala index e683a90f57..4e8dbde658 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala @@ -24,7 +24,6 @@ import org.scalatest.BeforeAndAfterAll import org.scalatest.FunSuite import org.apache.spark.SparkContext -import org.apache.spark.SparkContext._ import org.jblas._ diff --git a/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala index db980c7bae..b2c8df97a8 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala @@ -17,8 +17,6 @@ package org.apache.spark.mllib.regression -import scala.collection.JavaConversions._ -import scala.util.Random import org.scalatest.BeforeAndAfterAll import org.scalatest.FunSuite @@ -41,10 +39,10 @@ class LassoSuite extends FunSuite with BeforeAndAfterAll { } def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) { - val numOffPredictions = predictions.zip(input).filter { case (prediction, expected) => + val numOffPredictions = predictions.zip(input).count { case (prediction, expected) => // A prediction is off if the prediction is more than 0.5 away from expected value. math.abs(prediction - expected.label) > 0.5 - }.size + } // At least 80% of the predictions should be on. assert(numOffPredictions < input.length / 5) } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/regression/LinearRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/regression/LinearRegressionSuite.scala index ef500c704c..406afbaa3e 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/regression/LinearRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/regression/LinearRegressionSuite.scala @@ -21,7 +21,6 @@ import org.scalatest.BeforeAndAfterAll import org.scalatest.FunSuite import org.apache.spark.SparkContext -import org.apache.spark.SparkContext._ import org.apache.spark.mllib.util.LinearDataGenerator class LinearRegressionSuite extends FunSuite with BeforeAndAfterAll { @@ -37,10 +36,10 @@ class LinearRegressionSuite extends FunSuite with BeforeAndAfterAll { } def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) { - val numOffPredictions = predictions.zip(input).filter { case (prediction, expected) => + val numOffPredictions = predictions.zip(input).count { case (prediction, expected) => // A prediction is off if the prediction is more than 0.5 away from expected value. math.abs(prediction - expected.label) > 0.5 - }.size + } // At least 80% of the predictions should be on. assert(numOffPredictions < input.length / 5) } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/regression/RidgeRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/regression/RidgeRegressionSuite.scala index c18092d804..1d6a10b66e 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/regression/RidgeRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/regression/RidgeRegressionSuite.scala @@ -17,15 +17,12 @@ package org.apache.spark.mllib.regression -import scala.collection.JavaConversions._ -import scala.util.Random import org.jblas.DoubleMatrix import org.scalatest.BeforeAndAfterAll import org.scalatest.FunSuite import org.apache.spark.SparkContext -import org.apache.spark.SparkContext._ import org.apache.spark.mllib.util.LinearDataGenerator class RidgeRegressionSuite extends FunSuite with BeforeAndAfterAll { |