diff options
author | Dongjoon Hyun <dongjoon@apache.org> | 2016-03-09 10:31:26 +0000 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-03-09 10:31:26 +0000 |
commit | c3689bc24e03a9471cd6e8169da61963c4528252 (patch) | |
tree | 5d1ee90afa2087ede8e4dbc4dd666d699578c230 /mllib/src/test | |
parent | cbff2803ef117d7cffe6f05fc1bbd395a1e9c587 (diff) | |
download | spark-c3689bc24e03a9471cd6e8169da61963c4528252.tar.gz spark-c3689bc24e03a9471cd6e8169da61963c4528252.tar.bz2 spark-c3689bc24e03a9471cd6e8169da61963c4528252.zip |
[SPARK-13702][CORE][SQL][MLLIB] Use diamond operator for generic instance creation in Java code.
## What changes were proposed in this pull request?
In order to make `docs/examples` (and other related code) more simple/readable/user-friendly, this PR replaces existing codes like the followings by using `diamond` operator.
```
- final ArrayList<Product2<Object, Object>> dataToWrite =
- new ArrayList<Product2<Object, Object>>();
+ final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
```
Java 7 or higher supports **diamond** operator which replaces the type arguments required to invoke the constructor of a generic class with an empty set of type parameters (<>). Currently, Spark Java code use mixed usage of this.
## How was this patch tested?
Manual.
Pass the existing tests.
Author: Dongjoon Hyun <dongjoon@apache.org>
Closes #11541 from dongjoon-hyun/SPARK-13702.
Diffstat (limited to 'mllib/src/test')
8 files changed, 12 insertions, 12 deletions
diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java index 60f25e5cce..40b9c35adc 100644 --- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java @@ -56,7 +56,7 @@ public class JavaDecisionTreeClassifierSuite implements Serializable { JavaRDD<LabeledPoint> data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>(); + Map<Integer, Integer> categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java index 3c69467fa1..59b6fba7a9 100644 --- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java @@ -56,7 +56,7 @@ public class JavaGBTClassifierSuite implements Serializable { JavaRDD<LabeledPoint> data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>(); + Map<Integer, Integer> categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java index a66a1e1292..5485fcbf01 100644 --- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java @@ -57,7 +57,7 @@ public class JavaRandomForestClassifierSuite implements Serializable { JavaRDD<LabeledPoint> data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>(); + Map<Integer, Integer> categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java index ebe800e749..d5c9d120c5 100644 --- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java @@ -56,7 +56,7 @@ public class JavaDecisionTreeRegressorSuite implements Serializable { JavaRDD<LabeledPoint> data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>(); + Map<Integer, Integer> categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java index fc8c13db07..38d15dc2b7 100644 --- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java @@ -56,7 +56,7 @@ public class JavaGBTRegressorSuite implements Serializable { JavaRDD<LabeledPoint> data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>(); + Map<Integer, Integer> categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0); GBTRegressor rf = new GBTRegressor() diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java index a00ce5e249..31be8880c2 100644 --- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java @@ -57,7 +57,7 @@ public class JavaRandomForestRegressorSuite implements Serializable { JavaRDD<LabeledPoint> data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>(); + Map<Integer, Integer> categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java index 225a216270..db19b309f6 100644 --- a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java +++ b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java @@ -45,9 +45,9 @@ public class JavaLDASuite implements Serializable { @Before public void setUp() { sc = new JavaSparkContext("local", "JavaLDA"); - ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<Tuple2<Long, Vector>>(); + ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<>(); for (int i = 0; i < LDASuite.tinyCorpus().length; i++) { - tinyCorpus.add(new Tuple2<Long, Vector>((Long)LDASuite.tinyCorpus()[i]._1(), + tinyCorpus.add(new Tuple2<>((Long)LDASuite.tinyCorpus()[i]._1(), LDASuite.tinyCorpus()[i]._2())); } JavaRDD<Tuple2<Long, Vector>> tmpCorpus = sc.parallelize(tinyCorpus, 2); @@ -189,8 +189,8 @@ public class JavaLDASuite implements Serializable { double logPerplexity = toyModel.logPerplexity(pairedDocs); // check: logLikelihood. - ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<Tuple2<Long, Vector>>(); - docsSingleWord.add(new Tuple2<Long, Vector>(0L, Vectors.dense(1.0, 0.0, 0.0))); + ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<>(); + docsSingleWord.add(new Tuple2<>(0L, Vectors.dense(1.0, 0.0, 0.0))); JavaPairRDD<Long, Vector> single = JavaPairRDD.fromJavaRDD(sc.parallelize(docsSingleWord)); double logLikelihood = toyModel.logLikelihood(single); } diff --git a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java index 9925aae441..8dd29061da 100644 --- a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java +++ b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java @@ -64,7 +64,7 @@ public class JavaDecisionTreeSuite implements Serializable { public void runDTUsingConstructor() { List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList(); JavaRDD<LabeledPoint> rdd = sc.parallelize(arr); - HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>(); + HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>(); categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories int maxDepth = 4; @@ -84,7 +84,7 @@ public class JavaDecisionTreeSuite implements Serializable { public void runDTUsingStaticMethods() { List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList(); JavaRDD<LabeledPoint> rdd = sc.parallelize(arr); - HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>(); + HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>(); categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories int maxDepth = 4; |