aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-03-26 11:55:49 +0000
committerSean Owen <sowen@cloudera.com>2016-03-26 11:55:49 +0000
commit1808465855a8e2ce554b024ad5ff530457584aed (patch)
tree376671c3150edecfb4b91e131302aa51242f45a8 /sql
parentd23ad7c1c92a2344ec03bb4c600b766686faf439 (diff)
downloadspark-1808465855a8e2ce554b024ad5ff530457584aed.tar.gz
spark-1808465855a8e2ce554b024ad5ff530457584aed.tar.bz2
spark-1808465855a8e2ce554b024ad5ff530457584aed.zip
[MINOR] Fix newly added java-lint errors
## What changes were proposed in this pull request? This PR fixes some newly added java-lint errors(unused-imports, line-lengsth). ## How was this patch tested? Pass the Jenkins tests. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11968 from dongjoon-hyun/SPARK-14167.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java2
-rw-r--r--sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java2
-rw-r--r--sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java28
3 files changed, 16 insertions, 16 deletions
diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java
index 5f2de266b5..f37ef83ad9 100644
--- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java
+++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java
@@ -17,10 +17,8 @@
package org.apache.spark.sql.catalyst.expressions;
import org.apache.spark.unsafe.Platform;
-import org.apache.spark.util.SystemClock;
// scalastyle: off
-
/**
* xxHash64. A high quality and fast 64 bit hash code by Yann Colet and Mathias Westerdahl. The
* class below is modelled like its Murmur3_x86_32 cousin.
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
index c06342c3d4..5bfde55c3b 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.column.page.PageReadStore;
-import org.apache.parquet.schema.OriginalType;
-import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
import org.apache.spark.memory.MemoryMode;
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
index 86db8df4c0..a6c819373b 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
@@ -169,12 +169,14 @@ public class JavaDatasetSuite implements Serializable {
public void testGroupBy() {
List<String> data = Arrays.asList("a", "foo", "bar");
Dataset<String> ds = context.createDataset(data, Encoders.STRING());
- KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey(new MapFunction<String, Integer>() {
- @Override
- public Integer call(String v) throws Exception {
- return v.length();
- }
- }, Encoders.INT());
+ KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey(
+ new MapFunction<String, Integer>() {
+ @Override
+ public Integer call(String v) throws Exception {
+ return v.length();
+ }
+ },
+ Encoders.INT());
Dataset<String> mapped = grouped.mapGroups(new MapGroupsFunction<Integer, String, String>() {
@Override
@@ -217,12 +219,14 @@ public class JavaDatasetSuite implements Serializable {
List<Integer> data2 = Arrays.asList(2, 6, 10);
Dataset<Integer> ds2 = context.createDataset(data2, Encoders.INT());
- KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey(new MapFunction<Integer, Integer>() {
- @Override
- public Integer call(Integer v) throws Exception {
- return v / 2;
- }
- }, Encoders.INT());
+ KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey(
+ new MapFunction<Integer, Integer>() {
+ @Override
+ public Integer call(Integer v) throws Exception {
+ return v / 2;
+ }
+ },
+ Encoders.INT());
Dataset<String> cogrouped = grouped.cogroup(
grouped2,