diff options
author | Dongjoon Hyun <dongjoon@apache.org> | 2016-03-26 11:55:49 +0000 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-03-26 11:55:49 +0000 |
commit | 1808465855a8e2ce554b024ad5ff530457584aed (patch) | |
tree | 376671c3150edecfb4b91e131302aa51242f45a8 /sql/core | |
parent | d23ad7c1c92a2344ec03bb4c600b766686faf439 (diff) | |
download | spark-1808465855a8e2ce554b024ad5ff530457584aed.tar.gz spark-1808465855a8e2ce554b024ad5ff530457584aed.tar.bz2 spark-1808465855a8e2ce554b024ad5ff530457584aed.zip |
[MINOR] Fix newly added java-lint errors
## What changes were proposed in this pull request?
This PR fixes some newly added java-lint errors(unused-imports, line-lengsth).
## How was this patch tested?
Pass the Jenkins tests.
Author: Dongjoon Hyun <dongjoon@apache.org>
Closes #11968 from dongjoon-hyun/SPARK-14167.
Diffstat (limited to 'sql/core')
2 files changed, 16 insertions, 14 deletions
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java index c06342c3d4..5bfde55c3b 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java @@ -24,8 +24,6 @@ import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.page.PageReadStore; -import org.apache.parquet.schema.OriginalType; -import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import org.apache.spark.memory.MemoryMode; diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java index 86db8df4c0..a6c819373b 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java @@ -169,12 +169,14 @@ public class JavaDatasetSuite implements Serializable { public void testGroupBy() { List<String> data = Arrays.asList("a", "foo", "bar"); Dataset<String> ds = context.createDataset(data, Encoders.STRING()); - KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey(new MapFunction<String, Integer>() { - @Override - public Integer call(String v) throws Exception { - return v.length(); - } - }, Encoders.INT()); + KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey( + new MapFunction<String, Integer>() { + @Override + public Integer call(String v) throws Exception { + return v.length(); + } + }, + Encoders.INT()); Dataset<String> mapped = grouped.mapGroups(new MapGroupsFunction<Integer, String, String>() { @Override @@ -217,12 +219,14 @@ public class JavaDatasetSuite implements Serializable { List<Integer> data2 = Arrays.asList(2, 6, 10); Dataset<Integer> ds2 = context.createDataset(data2, Encoders.INT()); - KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey(new MapFunction<Integer, Integer>() { - @Override - public Integer call(Integer v) throws Exception { - return v / 2; - } - }, Encoders.INT()); + KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey( + new MapFunction<Integer, Integer>() { + @Override + public Integer call(Integer v) throws Exception { + return v / 2; + } + }, + Encoders.INT()); Dataset<String> cogrouped = grouped.cogroup( grouped2, |