From c3689bc24e03a9471cd6e8169da61963c4528252 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Wed, 9 Mar 2016 10:31:26 +0000 Subject: [SPARK-13702][CORE][SQL][MLLIB] Use diamond operator for generic instance creation in Java code. ## What changes were proposed in this pull request? In order to make `docs/examples` (and other related code) more simple/readable/user-friendly, this PR replaces existing codes like the followings by using `diamond` operator. ``` - final ArrayList> dataToWrite = - new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); ``` Java 7 or higher supports **diamond** operator which replaces the type arguments required to invoke the constructor of a generic class with an empty set of type parameters (<>). Currently, Spark Java code use mixed usage of this. ## How was this patch tested? Manual. Pass the existing tests. Author: Dongjoon Hyun Closes #11541 from dongjoon-hyun/SPARK-13702. --- .../spark/shuffle/sort/ShuffleExternalSorter.java | 4 ++-- .../org/apache/spark/status/api/v1/TaskSorting.java | 2 +- .../org/apache/spark/launcher/SparkLauncherSuite.java | 2 +- .../spark/shuffle/sort/UnsafeShuffleWriterSuite.java | 19 ++++++++----------- .../unsafe/map/AbstractBytesToBytesMapSuite.java | 6 +++--- .../org/apache/spark/util/collection/TestTimSort.java | 2 +- .../unsafe/sort/UnsafeExternalSorterSuite.java | 2 +- 7 files changed, 17 insertions(+), 20 deletions(-) (limited to 'core') diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java index f97e76d7ed..7a114df2d6 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java @@ -84,9 +84,9 @@ final class ShuffleExternalSorter extends MemoryConsumer { * this might not be necessary if we maintained a pool of re-usable pages in the TaskMemoryManager * itself). */ - private final LinkedList allocatedPages = new LinkedList(); + private final LinkedList allocatedPages = new LinkedList<>(); - private final LinkedList spills = new LinkedList(); + private final LinkedList spills = new LinkedList<>(); /** Peak memory used by this sorter so far, in bytes. **/ private long peakMemoryUsedBytes; diff --git a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java index f19ed01d5a..0cf84d5f9b 100644 --- a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java +++ b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java @@ -29,7 +29,7 @@ public enum TaskSorting { private final Set alternateNames; private TaskSorting(String... names) { - alternateNames = new HashSet(); + alternateNames = new HashSet<>(); for (String n: names) { alternateNames.add(n); } diff --git a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java index 1692df7d30..3e47bfc274 100644 --- a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java +++ b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java @@ -88,7 +88,7 @@ public class SparkLauncherSuite { @Test public void testChildProcLauncher() throws Exception { SparkSubmitOptionParser opts = new SparkSubmitOptionParser(); - Map env = new HashMap(); + Map env = new HashMap<>(); env.put("SPARK_PRINT_LAUNCH_COMMAND", "1"); SparkLauncher launcher = new SparkLauncher(env) diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java index add9d937d3..ddea6f5a69 100644 --- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java +++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java @@ -67,7 +67,7 @@ public class UnsafeShuffleWriterSuite { File mergedOutputFile; File tempDir; long[] partitionSizesInMergedFile; - final LinkedList spillFilesCreated = new LinkedList(); + final LinkedList spillFilesCreated = new LinkedList<>(); SparkConf conf; final Serializer serializer = new KryoSerializer(new SparkConf()); TaskMetrics taskMetrics; @@ -217,7 +217,7 @@ public class UnsafeShuffleWriterSuite { } private List> readRecordsFromFile() throws IOException { - final ArrayList> recordsList = new ArrayList>(); + final ArrayList> recordsList = new ArrayList<>(); long startOffset = 0; for (int i = 0; i < NUM_PARTITITONS; i++) { final long partitionSize = partitionSizesInMergedFile[i]; @@ -286,8 +286,7 @@ public class UnsafeShuffleWriterSuite { @Test public void writeWithoutSpilling() throws Exception { // In this example, each partition should have exactly one record: - final ArrayList> dataToWrite = - new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); for (int i = 0; i < NUM_PARTITITONS; i++) { dataToWrite.add(new Tuple2(i, i)); } @@ -325,8 +324,7 @@ public class UnsafeShuffleWriterSuite { conf.set("spark.shuffle.compress", "false"); } final UnsafeShuffleWriter writer = createWriter(transferToEnabled); - final ArrayList> dataToWrite = - new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); for (int i : new int[] { 1, 2, 3, 4, 4, 2 }) { dataToWrite.add(new Tuple2(i, i)); } @@ -403,7 +401,7 @@ public class UnsafeShuffleWriterSuite { public void writeEnoughDataToTriggerSpill() throws Exception { memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES); final UnsafeShuffleWriter writer = createWriter(false); - final ArrayList> dataToWrite = new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10]; for (int i = 0; i < 10 + 1; i++) { dataToWrite.add(new Tuple2(i, bigByteArray)); @@ -445,8 +443,7 @@ public class UnsafeShuffleWriterSuite { @Test public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception { final UnsafeShuffleWriter writer = createWriter(false); - final ArrayList> dataToWrite = - new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); final byte[] bytes = new byte[(int) (ShuffleExternalSorter.DISK_WRITE_BUFFER_SIZE * 2.5)]; new Random(42).nextBytes(bytes); dataToWrite.add(new Tuple2(1, ByteBuffer.wrap(bytes))); @@ -461,7 +458,7 @@ public class UnsafeShuffleWriterSuite { @Test public void writeRecordsThatAreBiggerThanMaxRecordSize() throws Exception { final UnsafeShuffleWriter writer = createWriter(false); - final ArrayList> dataToWrite = new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); dataToWrite.add(new Tuple2(1, ByteBuffer.wrap(new byte[1]))); // We should be able to write a record that's right _at_ the max record size final byte[] atMaxRecordSize = new byte[(int) taskMemoryManager.pageSizeBytes() - 4]; @@ -498,7 +495,7 @@ public class UnsafeShuffleWriterSuite { taskMemoryManager = spy(taskMemoryManager); when(taskMemoryManager.pageSizeBytes()).thenReturn(pageSizeBytes); final UnsafeShuffleWriter writer = - new UnsafeShuffleWriter( + new UnsafeShuffleWriter<>( blockManager, shuffleBlockResolver, taskMemoryManager, diff --git a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java index 61b94b736d..9aab2265c9 100644 --- a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java +++ b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java @@ -66,7 +66,7 @@ public abstract class AbstractBytesToBytesMapSuite { private TaskMemoryManager taskMemoryManager; private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes - final LinkedList spillFilesCreated = new LinkedList(); + final LinkedList spillFilesCreated = new LinkedList<>(); File tempDir; @Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager; @@ -397,7 +397,7 @@ public abstract class AbstractBytesToBytesMapSuite { final int size = 65536; // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays // into ByteBuffers in order to use them as keys here. - final Map expected = new HashMap(); + final Map expected = new HashMap<>(); final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size, PAGE_SIZE_BYTES); try { // Fill the map to 90% full so that we can trigger probing @@ -453,7 +453,7 @@ public abstract class AbstractBytesToBytesMapSuite { final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, pageSizeBytes); // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays // into ByteBuffers in order to use them as keys here. - final Map expected = new HashMap(); + final Map expected = new HashMap<>(); try { for (int i = 0; i < 1000; i++) { final byte[] key = getRandomByteArray(rand.nextInt(128)); diff --git a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java index 45772b6d3c..e884b1bc12 100644 --- a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java +++ b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java @@ -76,7 +76,7 @@ public class TestTimSort { * @param length The sum of all run lengths that will be added to runs. */ private static List runsJDKWorstCase(int minRun, int length) { - List runs = new ArrayList(); + List runs = new ArrayList<>(); long runningTotal = 0, Y = minRun + 4, X = minRun; diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java index 492fe49ba4..b757ddc3b3 100644 --- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java +++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java @@ -56,7 +56,7 @@ import static org.mockito.Mockito.*; public class UnsafeExternalSorterSuite { - final LinkedList spillFilesCreated = new LinkedList(); + final LinkedList spillFilesCreated = new LinkedList<>(); final TestMemoryManager memoryManager = new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")); final TaskMemoryManager taskMemoryManager = new TaskMemoryManager(memoryManager, 0); -- cgit v1.2.3