diff options
author | Dongjoon Hyun <dongjoon@apache.org> | 2016-03-26 11:55:49 +0000 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-03-26 11:55:49 +0000 |
commit | 1808465855a8e2ce554b024ad5ff530457584aed (patch) | |
tree | 376671c3150edecfb4b91e131302aa51242f45a8 | |
parent | d23ad7c1c92a2344ec03bb4c600b766686faf439 (diff) | |
download | spark-1808465855a8e2ce554b024ad5ff530457584aed.tar.gz spark-1808465855a8e2ce554b024ad5ff530457584aed.tar.bz2 spark-1808465855a8e2ce554b024ad5ff530457584aed.zip |
[MINOR] Fix newly added java-lint errors
## What changes were proposed in this pull request?
This PR fixes some newly added java-lint errors(unused-imports, line-lengsth).
## How was this patch tested?
Pass the Jenkins tests.
Author: Dongjoon Hyun <dongjoon@apache.org>
Closes #11968 from dongjoon-hyun/SPARK-14167.
7 files changed, 20 insertions, 22 deletions
diff --git a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala index d9fecc5e30..c3c59f857d 100644 --- a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala +++ b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala @@ -25,7 +25,7 @@ import scala.collection.mutable.ArrayBuffer import scala.language.implicitConversions import scala.xml.Node -import org.eclipse.jetty.server.{AbstractConnector, Connector, Request, Server} +import org.eclipse.jetty.server.{Connector, Request, Server} import org.eclipse.jetty.server.handler._ import org.eclipse.jetty.server.nio.SelectChannelConnector import org.eclipse.jetty.server.ssl.SslSelectChannelConnector diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java index fd19b43504..c0fa0b3cac 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaCorrelationsExample.java @@ -58,7 +58,8 @@ public class JavaCorrelationsExample { ) ); - // calculate the correlation matrix using Pearson's method. Use "spearman" for Spearman's method. + // calculate the correlation matrix using Pearson's method. + // Use "spearman" for Spearman's method. // If a method is not specified, Pearson's method will be used by default. Matrix correlMatrix = Statistics.corr(data.rdd(), "pearson"); System.out.println(correlMatrix.toString()); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java index f5a451019b..c27fba2783 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java @@ -27,7 +27,6 @@ import java.util.*; import scala.Tuple2; import org.apache.spark.api.java.JavaPairRDD; -import org.apache.spark.api.java.function.VoidFunction; // $example off$ public class JavaStratifiedSamplingExample { diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala index 7903caa312..e18831382d 100644 --- a/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala +++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/GraphImpl.scala @@ -20,11 +20,9 @@ package org.apache.spark.graphx.impl import scala.reflect.{classTag, ClassTag} import org.apache.spark.HashPartitioner -import org.apache.spark.SparkContext._ import org.apache.spark.graphx._ -import org.apache.spark.graphx.impl.GraphImpl._ import org.apache.spark.graphx.util.BytecodeUtils -import org.apache.spark.rdd.{RDD, ShuffledRDD} +import org.apache.spark.rdd.RDD import org.apache.spark.storage.StorageLevel /** diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java index 5f2de266b5..f37ef83ad9 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/XXH64.java @@ -17,10 +17,8 @@ package org.apache.spark.sql.catalyst.expressions; import org.apache.spark.unsafe.Platform; -import org.apache.spark.util.SystemClock; // scalastyle: off - /** * xxHash64. A high quality and fast 64 bit hash code by Yann Colet and Mathias Westerdahl. The * class below is modelled like its Murmur3_x86_32 cousin. diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java index c06342c3d4..5bfde55c3b 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java @@ -24,8 +24,6 @@ import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.page.PageReadStore; -import org.apache.parquet.schema.OriginalType; -import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import org.apache.spark.memory.MemoryMode; diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java index 86db8df4c0..a6c819373b 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java @@ -169,12 +169,14 @@ public class JavaDatasetSuite implements Serializable { public void testGroupBy() { List<String> data = Arrays.asList("a", "foo", "bar"); Dataset<String> ds = context.createDataset(data, Encoders.STRING()); - KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey(new MapFunction<String, Integer>() { - @Override - public Integer call(String v) throws Exception { - return v.length(); - } - }, Encoders.INT()); + KeyValueGroupedDataset<Integer, String> grouped = ds.groupByKey( + new MapFunction<String, Integer>() { + @Override + public Integer call(String v) throws Exception { + return v.length(); + } + }, + Encoders.INT()); Dataset<String> mapped = grouped.mapGroups(new MapGroupsFunction<Integer, String, String>() { @Override @@ -217,12 +219,14 @@ public class JavaDatasetSuite implements Serializable { List<Integer> data2 = Arrays.asList(2, 6, 10); Dataset<Integer> ds2 = context.createDataset(data2, Encoders.INT()); - KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey(new MapFunction<Integer, Integer>() { - @Override - public Integer call(Integer v) throws Exception { - return v / 2; - } - }, Encoders.INT()); + KeyValueGroupedDataset<Integer, Integer> grouped2 = ds2.groupByKey( + new MapFunction<Integer, Integer>() { + @Override + public Integer call(Integer v) throws Exception { + return v / 2; + } + }, + Encoders.INT()); Dataset<String> cogrouped = grouped.cogroup( grouped2, |