aboutsummaryrefslogtreecommitdiff
path: root/examples/src
diff options
context:
space:
mode:
Diffstat (limited to 'examples/src')
-rw-r--r--examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java14
-rw-r--r--examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java10
-rw-r--r--examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java2
-rw-r--r--examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java4
5 files changed, 17 insertions, 17 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
index 8abc03e73d..ebb0687b14 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
@@ -82,10 +82,10 @@ public final class JavaLogQuery {
String user = m.group(3);
String query = m.group(5);
if (!user.equalsIgnoreCase("-")) {
- return new Tuple3<String, String, String>(ip, user, query);
+ return new Tuple3<>(ip, user, query);
}
}
- return new Tuple3<String, String, String>(null, null, null);
+ return new Tuple3<>(null, null, null);
}
public static Stats extractStats(String line) {
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
index 5904260e2d..bc99dc023f 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
@@ -34,13 +34,13 @@ public class JavaMultiLabelClassificationMetricsExample {
JavaSparkContext sc = new JavaSparkContext(conf);
// $example on$
List<Tuple2<double[], double[]>> data = Arrays.asList(
- new Tuple2<double[], double[]>(new double[]{0.0, 1.0}, new double[]{0.0, 2.0}),
- new Tuple2<double[], double[]>(new double[]{0.0, 2.0}, new double[]{0.0, 1.0}),
- new Tuple2<double[], double[]>(new double[]{}, new double[]{0.0}),
- new Tuple2<double[], double[]>(new double[]{2.0}, new double[]{2.0}),
- new Tuple2<double[], double[]>(new double[]{2.0, 0.0}, new double[]{2.0, 0.0}),
- new Tuple2<double[], double[]>(new double[]{0.0, 1.0, 2.0}, new double[]{0.0, 1.0}),
- new Tuple2<double[], double[]>(new double[]{1.0}, new double[]{1.0, 2.0})
+ new Tuple2<>(new double[]{0.0, 1.0}, new double[]{0.0, 2.0}),
+ new Tuple2<>(new double[]{0.0, 2.0}, new double[]{0.0, 1.0}),
+ new Tuple2<>(new double[]{}, new double[]{0.0}),
+ new Tuple2<>(new double[]{2.0}, new double[]{2.0}),
+ new Tuple2<>(new double[]{2.0, 0.0}, new double[]{2.0, 0.0}),
+ new Tuple2<>(new double[]{0.0, 1.0, 2.0}, new double[]{0.0, 1.0}),
+ new Tuple2<>(new double[]{1.0}, new double[]{1.0, 2.0})
);
JavaRDD<Tuple2<double[], double[]>> scoreAndLabels = sc.parallelize(data);
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
index b62fa90c34..91c3bd72da 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
@@ -40,11 +40,11 @@ public class JavaPowerIterationClusteringExample {
@SuppressWarnings("unchecked")
// $example on$
JavaRDD<Tuple3<Long, Long, Double>> similarities = sc.parallelize(Lists.newArrayList(
- new Tuple3<Long, Long, Double>(0L, 1L, 0.9),
- new Tuple3<Long, Long, Double>(1L, 2L, 0.9),
- new Tuple3<Long, Long, Double>(2L, 3L, 0.9),
- new Tuple3<Long, Long, Double>(3L, 4L, 0.1),
- new Tuple3<Long, Long, Double>(4L, 5L, 0.9)));
+ new Tuple3<>(0L, 1L, 0.9),
+ new Tuple3<>(1L, 2L, 0.9),
+ new Tuple3<>(2L, 3L, 0.9),
+ new Tuple3<>(3L, 4L, 0.1),
+ new Tuple3<>(4L, 5L, 0.9)));
PowerIterationClustering pic = new PowerIterationClustering()
.setK(2)
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
index c27fba2783..86c389e11c 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
@@ -36,7 +36,7 @@ public class JavaStratifiedSamplingExample {
JavaSparkContext jsc = new JavaSparkContext(conf);
// $example on$
- List<Tuple2<Integer, Character>> list = new ArrayList<Tuple2<Integer, Character>>(
+ List<Tuple2<Integer, Character>> list = new ArrayList<>(
Arrays.<Tuple2<Integer, Character>>asList(
new Tuple2(1, 'a'),
new Tuple2(1, 'b'),
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
index da56637fe8..bae4b78ac2 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
@@ -19,7 +19,6 @@ package org.apache.spark.examples.streaming;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.Function;
-import org.apache.spark.examples.streaming.StreamingExamples;
import org.apache.spark.streaming.*;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.streaming.flume.FlumeUtils;
@@ -58,7 +57,8 @@ public final class JavaFlumeEventCount {
Duration batchInterval = new Duration(2000);
SparkConf sparkConf = new SparkConf().setAppName("JavaFlumeEventCount");
JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, batchInterval);
- JavaReceiverInputDStream<SparkFlumeEvent> flumeStream = FlumeUtils.createStream(ssc, host, port);
+ JavaReceiverInputDStream<SparkFlumeEvent> flumeStream =
+ FlumeUtils.createStream(ssc, host, port);
flumeStream.count();