aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-08-25 12:33:13 +0100
committerSean Owen <sowen@cloudera.com>2015-08-25 12:33:13 +0100
commit69c9c177160e32a2fbc9b36ecc52156077fca6fc (patch)
tree57345aaf19c3149038bfca5c4ddccf33d41bdd5b /mllib
parent7f1e507bf7e82bff323c5dec3c1ee044687c4173 (diff)
downloadspark-69c9c177160e32a2fbc9b36ecc52156077fca6fc.tar.gz
spark-69c9c177160e32a2fbc9b36ecc52156077fca6fc.tar.bz2
spark-69c9c177160e32a2fbc9b36ecc52156077fca6fc.zip
[SPARK-9613] [CORE] Ban use of JavaConversions and migrate all existing uses to JavaConverters
Replace `JavaConversions` implicits with `JavaConverters` Most occurrences I've seen so far are necessary conversions; a few have been avoidable. None are in critical code as far as I see, yet. Author: Sean Owen <sowen@cloudera.com> Closes #8033 from srowen/SPARK-9613.
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala4
-rw-r--r--mllib/src/test/java/org/apache/spark/ml/classification/JavaOneVsRestSuite.java7
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala4
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala4
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/optimization/GradientDescentSuite.scala4
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala4
6 files changed, 14 insertions, 13 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala
index 87eeb5db05..7a1c779606 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.util
-import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
import scala.util.Random
import com.github.fommil.netlib.BLAS.{getInstance => blas}
@@ -52,7 +52,7 @@ object LinearDataGenerator {
nPoints: Int,
seed: Int,
eps: Double): java.util.List[LabeledPoint] = {
- seqAsJavaList(generateLinearInput(intercept, weights, nPoints, seed, eps))
+ generateLinearInput(intercept, weights, nPoints, seed, eps).asJava
}
/**
diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaOneVsRestSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaOneVsRestSuite.java
index a1ee554152..2744e020e9 100644
--- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaOneVsRestSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaOneVsRestSuite.java
@@ -20,7 +20,7 @@ package org.apache.spark.ml.classification;
import java.io.Serializable;
import java.util.List;
-import static scala.collection.JavaConversions.seqAsJavaList;
+import scala.collection.JavaConverters;
import org.junit.After;
import org.junit.Assert;
@@ -55,8 +55,9 @@ public class JavaOneVsRestSuite implements Serializable {
double[] xMean = {5.843, 3.057, 3.758, 1.199};
double[] xVariance = {0.6856, 0.1899, 3.116, 0.581};
- List<LabeledPoint> points = seqAsJavaList(generateMultinomialLogisticInput(
- weights, xMean, xVariance, true, nPoints, 42));
+ List<LabeledPoint> points = JavaConverters.asJavaListConverter(
+ generateMultinomialLogisticInput(weights, xMean, xVariance, true, nPoints, 42)
+ ).asJava();
datasetRDD = jsc.parallelize(points, 2);
dataset = jsql.createDataFrame(datasetRDD, LabeledPoint.class);
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala
index 2473510e13..8d14bb6572 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.classification
-import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
import scala.util.Random
import scala.util.control.Breaks._
@@ -38,7 +38,7 @@ object LogisticRegressionSuite {
scale: Double,
nPoints: Int,
seed: Int): java.util.List[LabeledPoint] = {
- seqAsJavaList(generateLogisticInput(offset, scale, nPoints, seed))
+ generateLogisticInput(offset, scale, nPoints, seed).asJava
}
// Generate input of the form Y = logistic(offset + scale*X)
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala
index b1d78cba9e..ee3c85d09a 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/SVMSuite.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.classification
-import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
import scala.util.Random
import org.jblas.DoubleMatrix
@@ -35,7 +35,7 @@ object SVMSuite {
weights: Array[Double],
nPoints: Int,
seed: Int): java.util.List[LabeledPoint] = {
- seqAsJavaList(generateSVMInput(intercept, weights, nPoints, seed))
+ generateSVMInput(intercept, weights, nPoints, seed).asJava
}
// Generate noisy input of the form Y = signum(x.dot(weights) + intercept + noise)
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/optimization/GradientDescentSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/optimization/GradientDescentSuite.scala
index 13b754a039..36ac7d2672 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/optimization/GradientDescentSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/optimization/GradientDescentSuite.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.optimization
-import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
import scala.util.Random
import org.scalatest.Matchers
@@ -35,7 +35,7 @@ object GradientDescentSuite {
scale: Double,
nPoints: Int,
seed: Int): java.util.List[LabeledPoint] = {
- seqAsJavaList(generateGDInput(offset, scale, nPoints, seed))
+ generateGDInput(offset, scale, nPoints, seed).asJava
}
// Generate input of the form Y = logistic(offset + scale * X)
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala
index 05b87728d6..045135f7f8 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.recommendation
-import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
import scala.math.abs
import scala.util.Random
@@ -38,7 +38,7 @@ object ALSSuite {
negativeWeights: Boolean): (java.util.List[Rating], DoubleMatrix, DoubleMatrix) = {
val (sampledRatings, trueRatings, truePrefs) =
generateRatings(users, products, features, samplingRate, implicitPrefs)
- (seqAsJavaList(sampledRatings), trueRatings, truePrefs)
+ (sampledRatings.asJava, trueRatings, truePrefs)
}
def generateRatings(