aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/java
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-04-24 20:40:03 -0700
committerReynold Xin <rxin@databricks.com>2016-04-24 20:40:03 -0700
commitd34d6503786bbe429c10ddb1879519cc9bd709b6 (patch)
tree885752ae5137e5a2f87dd8802c2e789ea271ee77 /core/src/main/java
parentd0ca5797a8fc55a3046cdfad8860ba3a29f72b51 (diff)
downloadspark-d34d6503786bbe429c10ddb1879519cc9bd709b6.tar.gz
spark-d34d6503786bbe429c10ddb1879519cc9bd709b6.tar.bz2
spark-d34d6503786bbe429c10ddb1879519cc9bd709b6.zip
[SPARK-14868][BUILD] Enable NewLineAtEofChecker in checkstyle and fix lint-java errors
## What changes were proposed in this pull request? Spark uses `NewLineAtEofChecker` rule in Scala by ScalaStyle. And, most Java code also comply with the rule. This PR aims to enforce the same rule `NewlineAtEndOfFile` by CheckStyle explicitly. Also, this fixes lint-java errors since SPARK-14465. The followings are the items. - Adds a new line at the end of the files (19 files) - Fixes 25 lint-java errors (12 RedundantModifier, 6 **ArrayTypeStyle**, 2 LineLength, 2 UnusedImports, 2 RegexpSingleline, 1 ModifierOrder) ## How was this patch tested? After the Jenkins test succeeds, `dev/lint-java` should pass. (Currently, Jenkins dose not run lint-java.) ```bash $ dev/lint-java Using `mvn` from path: /usr/local/bin/mvn Checkstyle checks passed. ``` Author: Dongjoon Hyun <dongjoon@apache.org> Closes #12632 from dongjoon-hyun/SPARK-14868.
Diffstat (limited to 'core/src/main/java')
-rw-r--r--core/src/main/java/org/apache/spark/api/java/function/package-info.java2
-rw-r--r--core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java4
-rw-r--r--core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java27
-rw-r--r--core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RadixSort.java4
-rw-r--r--core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java4
5 files changed, 20 insertions, 21 deletions
diff --git a/core/src/main/java/org/apache/spark/api/java/function/package-info.java b/core/src/main/java/org/apache/spark/api/java/function/package-info.java
index 463a42f233..eefb29aca9 100644
--- a/core/src/main/java/org/apache/spark/api/java/function/package-info.java
+++ b/core/src/main/java/org/apache/spark/api/java/function/package-info.java
@@ -20,4 +20,4 @@
* these interfaces to pass functions to various Java API methods for Spark. Please visit Spark's
* Java programming guide for more details.
*/
-package org.apache.spark.api.java.function; \ No newline at end of file
+package org.apache.spark.api.java.function;
diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
index c4041a97e8..2be5a16b2d 100644
--- a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
+++ b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
@@ -256,8 +256,8 @@ final class ShuffleExternalSorter extends MemoryConsumer {
final long spillSize = freeMemory();
inMemSorter.reset();
// Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
- // records. Otherwise, if the task is over allocated memory, then without freeing the memory pages,
- // we might not be able to get memory for the pointer array.
+ // records. Otherwise, if the task is over allocated memory, then without freeing the memory
+ // pages, we might not be able to get memory for the pointer array.
taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
return spillSize;
}
diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java
index 21f2fde79d..c44630fbbc 100644
--- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java
+++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/PrefixComparators.java
@@ -22,7 +22,6 @@ import com.google.common.primitives.UnsignedLongs;
import org.apache.spark.annotation.Private;
import org.apache.spark.unsafe.types.ByteArray;
import org.apache.spark.unsafe.types.UTF8String;
-import org.apache.spark.util.Utils;
@Private
public class PrefixComparators {
@@ -69,7 +68,7 @@ public class PrefixComparators {
* Provides radix sort parameters. Comparators implementing this also are indicating that the
* ordering they define is compatible with radix sort.
*/
- public static abstract class RadixSortSupport extends PrefixComparator {
+ public abstract static class RadixSortSupport extends PrefixComparator {
/** @return Whether the sort should be descending in binary sort order. */
public abstract boolean sortDescending();
@@ -82,37 +81,37 @@ public class PrefixComparators {
//
public static final class UnsignedPrefixComparator extends RadixSortSupport {
- @Override public final boolean sortDescending() { return false; }
- @Override public final boolean sortSigned() { return false; }
+ @Override public boolean sortDescending() { return false; }
+ @Override public boolean sortSigned() { return false; }
@Override
- public final int compare(long aPrefix, long bPrefix) {
+ public int compare(long aPrefix, long bPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class UnsignedPrefixComparatorDesc extends RadixSortSupport {
- @Override public final boolean sortDescending() { return true; }
- @Override public final boolean sortSigned() { return false; }
+ @Override public boolean sortDescending() { return true; }
+ @Override public boolean sortSigned() { return false; }
@Override
- public final int compare(long bPrefix, long aPrefix) {
+ public int compare(long bPrefix, long aPrefix) {
return UnsignedLongs.compare(aPrefix, bPrefix);
}
}
public static final class SignedPrefixComparator extends RadixSortSupport {
- @Override public final boolean sortDescending() { return false; }
- @Override public final boolean sortSigned() { return true; }
+ @Override public boolean sortDescending() { return false; }
+ @Override public boolean sortSigned() { return true; }
@Override
- public final int compare(long a, long b) {
+ public int compare(long a, long b) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
public static final class SignedPrefixComparatorDesc extends RadixSortSupport {
- @Override public final boolean sortDescending() { return true; }
- @Override public final boolean sortSigned() { return true; }
+ @Override public boolean sortDescending() { return true; }
+ @Override public boolean sortSigned() { return true; }
@Override
- public final int compare(long b, long a) {
+ public int compare(long b, long a) {
return (a < b) ? -1 : (a > b) ? 1 : 0;
}
}
diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RadixSort.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RadixSort.java
index 3357b8e474..4f3f0de7b8 100644
--- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RadixSort.java
+++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/RadixSort.java
@@ -16,7 +16,7 @@
*/
package org.apache.spark.util.collection.unsafe.sort;
-
+
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.array.LongArray;
@@ -227,7 +227,7 @@ public class RadixSort {
}
return counts;
}
-
+
/**
* Specialization of sortAtByte() for key-prefix arrays.
*/
diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java
index 3c1cd39dc2..8b6c96a4c4 100644
--- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java
+++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java
@@ -212,8 +212,8 @@ public final class UnsafeExternalSorter extends MemoryConsumer {
// written to disk. This also counts the space needed to store the sorter's pointer array.
inMemSorter.reset();
// Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
- // records. Otherwise, if the task is over allocated memory, then without freeing the memory pages,
- // we might not be able to get memory for the pointer array.
+ // records. Otherwise, if the task is over allocated memory, then without freeing the memory
+ // pages, we might not be able to get memory for the pointer array.
taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
totalSpillBytes += spillSize;