From 20fd254101553cb5a4c932c8d03064899112bee6 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Mon, 21 Mar 2016 07:58:57 +0000 Subject: [SPARK-14011][CORE][SQL] Enable `LineLength` Java checkstyle rule ## What changes were proposed in this pull request? [Spark Coding Style Guide](https://cwiki.apache.org/confluence/display/SPARK/Spark+Code+Style+Guide) has 100-character limit on lines, but it's disabled for Java since 11/09/15. This PR enables **LineLength** checkstyle again. To help that, this also introduces **RedundantImport** and **RedundantModifier**, too. The following is the diff on `checkstyle.xml`. ```xml - - -167,5 +164,7 + + ``` ## How was this patch tested? Currently, `lint-java` is disabled in Jenkins. It needs a manual test. After passing the Jenkins tests, `dev/lint-java` should passes locally. Author: Dongjoon Hyun Closes #11831 from dongjoon-hyun/SPARK-14011. --- .../org/apache/spark/examples/JavaPageRank.java | 15 +++++++------ .../org/apache/spark/examples/JavaWordCount.java | 26 ++++++++++++---------- .../ml/JavaDecisionTreeClassificationExample.java | 5 ++++- .../spark/examples/ml/JavaDeveloperApiExample.java | 6 ++--- .../JavaGradientBoostedTreeClassifierExample.java | 3 ++- .../JavaBinaryClassificationMetricsExample.java | 3 ++- .../mllib/JavaIsotonicRegressionExample.java | 6 +++-- .../examples/mllib/JavaStreamingTestExample.java | 1 - .../streaming/JavaDirectKafkaWordCount.java | 3 ++- .../spark/examples/streaming/JavaQueueStream.java | 1 - .../streaming/JavaRecoverableNetworkWordCount.java | 9 +++++--- .../streaming/JavaStatefulNetworkWordCount.java | 3 ++- 12 files changed, 47 insertions(+), 34 deletions(-) (limited to 'examples') diff --git a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java index c3ef93c5b6..229d123441 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java @@ -84,13 +84,14 @@ public final class JavaPageRank { JavaRDD lines = ctx.textFile(args[0], 1); // Loads all URLs from input file and initialize their neighbors. - JavaPairRDD> links = lines.mapToPair(new PairFunction() { - @Override - public Tuple2 call(String s) { - String[] parts = SPACES.split(s); - return new Tuple2<>(parts[0], parts[1]); - } - }).distinct().groupByKey().cache(); + JavaPairRDD> links = lines.mapToPair( + new PairFunction() { + @Override + public Tuple2 call(String s) { + String[] parts = SPACES.split(s); + return new Tuple2<>(parts[0], parts[1]); + } + }).distinct().groupByKey().cache(); // Loads all URLs with other URL(s) link to from input file and initialize ranks of them to one. JavaPairRDD ranks = links.mapValues(new Function, Double>() { diff --git a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java index 84dbea5caa..3ff5412b93 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java @@ -52,19 +52,21 @@ public final class JavaWordCount { } }); - JavaPairRDD ones = words.mapToPair(new PairFunction() { - @Override - public Tuple2 call(String s) { - return new Tuple2<>(s, 1); - } - }); + JavaPairRDD ones = words.mapToPair( + new PairFunction() { + @Override + public Tuple2 call(String s) { + return new Tuple2<>(s, 1); + } + }); - JavaPairRDD counts = ones.reduceByKey(new Function2() { - @Override - public Integer call(Integer i1, Integer i2) { - return i1 + i2; - } - }); + JavaPairRDD counts = ones.reduceByKey( + new Function2() { + @Override + public Integer call(Integer i1, Integer i2) { + return i1 + i2; + } + }); List> output = counts.collect(); for (Tuple2 tuple : output) { diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java index 5bd61fe508..8214952f80 100644 --- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java +++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java @@ -39,7 +39,10 @@ public class JavaDecisionTreeClassificationExample { // $example on$ // Load the data stored in LIBSVM format as a DataFrame. - Dataset data = sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); + Dataset data = sqlContext + .read() + .format("libsvm") + .load("data/mllib/sample_libsvm_data.txt"); // Index labels, adding metadata to the label column. // Fit on whole dataset to include all labels in index. diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java index 8a10dd48aa..fbd8817669 100644 --- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java +++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java @@ -107,11 +107,11 @@ public class JavaDeveloperApiExample { class MyJavaLogisticRegression extends Classifier { - public MyJavaLogisticRegression() { + MyJavaLogisticRegression() { init(); } - public MyJavaLogisticRegression(String uid) { + MyJavaLogisticRegression(String uid) { this.uid_ = uid; init(); } @@ -177,7 +177,7 @@ class MyJavaLogisticRegressionModel private Vector coefficients_; public Vector coefficients() { return coefficients_; } - public MyJavaLogisticRegressionModel(String uid, Vector coefficients) { + MyJavaLogisticRegressionModel(String uid, Vector coefficients) { this.uid_ = uid; this.coefficients_ = coefficients; } diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java index c2cb955385..553070dace 100644 --- a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java +++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java @@ -40,7 +40,8 @@ public class JavaGradientBoostedTreeClassifierExample { // $example on$ // Load and parse the data file, converting it to a DataFrame. - Dataset data = sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); + Dataset data = sqlContext.read().format("libsvm") + .load("data/mllib/sample_libsvm_data.txt"); // Index labels, adding metadata to the label column. // Fit on whole dataset to include all labels in index. diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaBinaryClassificationMetricsExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaBinaryClassificationMetricsExample.java index 3d8babba04..7561a1f653 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaBinaryClassificationMetricsExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaBinaryClassificationMetricsExample.java @@ -65,7 +65,8 @@ public class JavaBinaryClassificationMetricsExample { ); // Get evaluation metrics. - BinaryClassificationMetrics metrics = new BinaryClassificationMetrics(predictionAndLabels.rdd()); + BinaryClassificationMetrics metrics = + new BinaryClassificationMetrics(predictionAndLabels.rdd()); // Precision by threshold JavaRDD> precision = metrics.precisionByThreshold().toJavaRDD(); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java index 0e15f75508..c6361a3729 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java @@ -48,7 +48,8 @@ public class JavaIsotonicRegressionExample { ); // Split data into training (60%) and test (40%) sets. - JavaRDD>[] splits = parsedData.randomSplit(new double[]{0.6, 0.4}, 11L); + JavaRDD>[] splits = + parsedData.randomSplit(new double[]{0.6, 0.4}, 11L); JavaRDD> training = splits[0]; JavaRDD> test = splits[1]; @@ -80,7 +81,8 @@ public class JavaIsotonicRegressionExample { // Save and load model model.save(jsc.sc(), "target/tmp/myIsotonicRegressionModel"); - IsotonicRegressionModel sameModel = IsotonicRegressionModel.load(jsc.sc(), "target/tmp/myIsotonicRegressionModel"); + IsotonicRegressionModel sameModel = + IsotonicRegressionModel.load(jsc.sc(), "target/tmp/myIsotonicRegressionModel"); // $example off$ jsc.stop(); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java index 4c8755916c..984909cb94 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java @@ -18,7 +18,6 @@ package org.apache.spark.examples.mllib; -import org.apache.spark.Accumulator; import org.apache.spark.api.java.function.VoidFunction; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.function.Function; diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java index bfbad91e4f..769b21cecf 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java @@ -40,7 +40,8 @@ import org.apache.spark.streaming.Durations; * is a list of one or more kafka topics to consume from * * Example: - * $ bin/run-example streaming.JavaDirectKafkaWordCount broker1-host:port,broker2-host:port topic1,topic2 + * $ bin/run-example streaming.JavaDirectKafkaWordCount broker1-host:port,broker2-host:port \ + * topic1,topic2 */ public final class JavaDirectKafkaWordCount { diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java index 426eaa5f0a..62413b4606 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java @@ -30,7 +30,6 @@ import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.function.Function2; import org.apache.spark.api.java.function.PairFunction; -import org.apache.spark.examples.streaming.StreamingExamples; import org.apache.spark.streaming.Duration; import org.apache.spark.streaming.api.java.JavaDStream; import org.apache.spark.streaming.api.java.JavaPairDStream; diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java index a597ecbc5b..e5fb2bfbfa 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java @@ -155,9 +155,11 @@ public final class JavaRecoverableNetworkWordCount { @Override public void call(JavaPairRDD rdd, Time time) throws IOException { // Get or register the blacklist Broadcast - final Broadcast> blacklist = JavaWordBlacklist.getInstance(new JavaSparkContext(rdd.context())); + final Broadcast> blacklist = + JavaWordBlacklist.getInstance(new JavaSparkContext(rdd.context())); // Get or register the droppedWordsCounter Accumulator - final Accumulator droppedWordsCounter = JavaDroppedWordsCounter.getInstance(new JavaSparkContext(rdd.context())); + final Accumulator droppedWordsCounter = + JavaDroppedWordsCounter.getInstance(new JavaSparkContext(rdd.context())); // Use blacklist to drop words and use droppedWordsCounter to count them String counts = rdd.filter(new Function, Boolean>() { @Override @@ -210,7 +212,8 @@ public final class JavaRecoverableNetworkWordCount { } }; - JavaStreamingContext ssc = JavaStreamingContext.getOrCreate(checkpointDirectory, createContextFunc); + JavaStreamingContext ssc = + JavaStreamingContext.getOrCreate(checkpointDirectory, createContextFunc); ssc.start(); ssc.awaitTermination(); } diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java index 6beab90f08..4230dab52e 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java @@ -91,7 +91,8 @@ public class JavaStatefulNetworkWordCount { Function3, State, Tuple2> mappingFunc = new Function3, State, Tuple2>() { @Override - public Tuple2 call(String word, Optional one, State state) { + public Tuple2 call(String word, Optional one, + State state) { int sum = one.orElse(0) + (state.exists() ? state.get() : 0); Tuple2 output = new Tuple2<>(word, sum); state.update(sum); -- cgit v1.2.3