diff options
author | Dongjoon Hyun <dongjoon@apache.org> | 2016-03-09 10:31:26 +0000 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-03-09 10:31:26 +0000 |
commit | c3689bc24e03a9471cd6e8169da61963c4528252 (patch) | |
tree | 5d1ee90afa2087ede8e4dbc4dd666d699578c230 /sql/core/src | |
parent | cbff2803ef117d7cffe6f05fc1bbd395a1e9c587 (diff) | |
download | spark-c3689bc24e03a9471cd6e8169da61963c4528252.tar.gz spark-c3689bc24e03a9471cd6e8169da61963c4528252.tar.bz2 spark-c3689bc24e03a9471cd6e8169da61963c4528252.zip |
[SPARK-13702][CORE][SQL][MLLIB] Use diamond operator for generic instance creation in Java code.
## What changes were proposed in this pull request?
In order to make `docs/examples` (and other related code) more simple/readable/user-friendly, this PR replaces existing codes like the followings by using `diamond` operator.
```
- final ArrayList<Product2<Object, Object>> dataToWrite =
- new ArrayList<Product2<Object, Object>>();
+ final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
```
Java 7 or higher supports **diamond** operator which replaces the type arguments required to invoke the constructor of a generic class with an empty set of type parameters (<>). Currently, Spark Java code use mixed usage of this.
## How was this patch tested?
Manual.
Pass the existing tests.
Author: Dongjoon Hyun <dongjoon@apache.org>
Closes #11541 from dongjoon-hyun/SPARK-13702.
Diffstat (limited to 'sql/core/src')
3 files changed, 3 insertions, 3 deletions
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java index 6bcd155ccd..5c257bc260 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java @@ -149,7 +149,7 @@ public abstract class SpecificParquetRecordReaderBase<T> extends RecordReader<Vo * by MapReduce. */ public static List<String> listDirectory(File path) throws IOException { - List<String> result = new ArrayList<String>(); + List<String> result = new ArrayList<>(); if (path.isDirectory()) { for (File f: path.listFiles()) { result.addAll(listDirectory(f)); diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java index 640efcc737..51f987fda9 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java @@ -111,7 +111,7 @@ public class JavaApplySchemaSuite implements Serializable { df.registerTempTable("people"); Row[] actual = sqlContext.sql("SELECT * FROM people").collect(); - List<Row> expected = new ArrayList<Row>(2); + List<Row> expected = new ArrayList<>(2); expected.add(RowFactory.create("Michael", 29)); expected.add(RowFactory.create("Yin", 28)); diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java index 9b624f318c..b054b1095b 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java @@ -67,7 +67,7 @@ public class JavaDatasetSuite implements Serializable { } private <T1, T2> Tuple2<T1, T2> tuple2(T1 t1, T2 t2) { - return new Tuple2<T1, T2>(t1, t2); + return new Tuple2<>(t1, t2); } @Test |