aboutsummaryrefslogtreecommitdiff
path: root/streaming
diff options
context:
space:
mode:
authorDmitry Erastov <derastov@gmail.com>2015-12-04 12:03:45 -0800
committerJosh Rosen <joshrosen@databricks.com>2015-12-04 12:03:45 -0800
commitd0d82227785dcd6c49a986431c476c7838a9541c (patch)
tree2700b82eeb7d72228aeff2b2ad865b6c1461517a /streaming
parent95296d9b1ad1d9e9396d7dfd0015ef27ce1cf341 (diff)
downloadspark-d0d82227785dcd6c49a986431c476c7838a9541c.tar.gz
spark-d0d82227785dcd6c49a986431c476c7838a9541c.tar.bz2
spark-d0d82227785dcd6c49a986431c476c7838a9541c.zip
[SPARK-6990][BUILD] Add Java linting script; fix minor warnings
This replaces https://github.com/apache/spark/pull/9696 Invoke Checkstyle and print any errors to the console, failing the step. Use Google's style rules modified according to https://cwiki.apache.org/confluence/display/SPARK/Spark+Code+Style+Guide Some important checks are disabled (see TODOs in `checkstyle.xml`) due to multiple violations being present in the codebase. Suggest fixing those TODOs in a separate PR(s). More on Checkstyle can be found on the [official website](http://checkstyle.sourceforge.net/). Sample output (from [build 46345](https://amplab.cs.berkeley.edu/jenkins/job/SparkPullRequestBuilder/46345/consoleFull)) (duplicated because I run the build twice with different profiles): > Checkstyle checks failed at following occurrences: [ERROR] src/main/java/org/apache/spark/sql/execution/datasources/parquet/UnsafeRowParquetRecordReader.java:[217,7] (coding) MissingSwitchDefault: switch without "default" clause. > [ERROR] src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java:[198,10] (modifier) ModifierOrder: 'protected' modifier out of order with the JLS suggestions. > [ERROR] src/main/java/org/apache/spark/sql/execution/datasources/parquet/UnsafeRowParquetRecordReader.java:[217,7] (coding) MissingSwitchDefault: switch without "default" clause. > [ERROR] src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java:[198,10] (modifier) ModifierOrder: 'protected' modifier out of order with the JLS suggestions. > [error] running /home/jenkins/workspace/SparkPullRequestBuilder2/dev/lint-java ; received return code 1 Also fix some of the minor violations that didn't require sweeping changes. Apologies for the previous botched PRs - I finally figured out the issue. cr: JoshRosen, pwendell > I state that the contribution is my original work, and I license the work to the project under the project's open source license. Author: Dmitry Erastov <derastov@gmail.com> Closes #9867 from dskrvk/master.
Diffstat (limited to 'streaming')
-rw-r--r--streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLog.java10
-rw-r--r--streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java4
-rw-r--r--streaming/src/test/java/org/apache/spark/streaming/JavaTrackStateByKeySuite.java2
3 files changed, 8 insertions, 8 deletions
diff --git a/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLog.java b/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLog.java
index 3738fc1a23..2803cad809 100644
--- a/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLog.java
+++ b/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLog.java
@@ -37,26 +37,26 @@ public abstract class WriteAheadLog {
* ensure that the written data is durable and readable (using the record handle) by the
* time this function returns.
*/
- abstract public WriteAheadLogRecordHandle write(ByteBuffer record, long time);
+ public abstract WriteAheadLogRecordHandle write(ByteBuffer record, long time);
/**
* Read a written record based on the given record handle.
*/
- abstract public ByteBuffer read(WriteAheadLogRecordHandle handle);
+ public abstract ByteBuffer read(WriteAheadLogRecordHandle handle);
/**
* Read and return an iterator of all the records that have been written but not yet cleaned up.
*/
- abstract public Iterator<ByteBuffer> readAll();
+ public abstract Iterator<ByteBuffer> readAll();
/**
* Clean all the records that are older than the threshold time. It can wait for
* the completion of the deletion.
*/
- abstract public void clean(long threshTime, boolean waitForCompletion);
+ public abstract void clean(long threshTime, boolean waitForCompletion);
/**
* Close this log and release any resources.
*/
- abstract public void close();
+ public abstract void close();
}
diff --git a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
index 609bb4413b..9722c60bba 100644
--- a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
+++ b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
@@ -1332,12 +1332,12 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
public void testUpdateStateByKeyWithInitial() {
List<List<Tuple2<String, Integer>>> inputData = stringIntKVStream;
- List<Tuple2<String, Integer>> initial = Arrays.asList (
+ List<Tuple2<String, Integer>> initial = Arrays.asList(
new Tuple2<>("california", 1),
new Tuple2<>("new york", 2));
JavaRDD<Tuple2<String, Integer>> tmpRDD = ssc.sparkContext().parallelize(initial);
- JavaPairRDD<String, Integer> initialRDD = JavaPairRDD.fromJavaRDD (tmpRDD);
+ JavaPairRDD<String, Integer> initialRDD = JavaPairRDD.fromJavaRDD(tmpRDD);
List<List<Tuple2<String, Integer>>> expected = Arrays.asList(
Arrays.asList(new Tuple2<>("california", 5),
diff --git a/streaming/src/test/java/org/apache/spark/streaming/JavaTrackStateByKeySuite.java b/streaming/src/test/java/org/apache/spark/streaming/JavaTrackStateByKeySuite.java
index eac4cdd14a..89d0bb7b61 100644
--- a/streaming/src/test/java/org/apache/spark/streaming/JavaTrackStateByKeySuite.java
+++ b/streaming/src/test/java/org/apache/spark/streaming/JavaTrackStateByKeySuite.java
@@ -95,7 +95,7 @@ public class JavaTrackStateByKeySuite extends LocalJavaStreamingContext implemen
JavaTrackStateDStream<String, Integer, Boolean, Double> stateDstream2 =
wordsDstream.trackStateByKey(
- StateSpec.<String, Integer, Boolean, Double> function(trackStateFunc2)
+ StateSpec.<String, Integer, Boolean, Double>function(trackStateFunc2)
.initialState(initialRDD)
.numPartitions(10)
.partitioner(new HashPartitioner(10))