aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-03-03 10:12:32 +0000
committerSean Owen <sowen@cloudera.com>2016-03-03 10:12:32 +0000
commitb5f02d6743ecb1633b7b13382f76cb8bfc2aa95c (patch)
tree7f3fc1df17d1bc8cd95ca0eaf3f79b6a47c5a845 /examples
parente97fc7f176f8bf501c9b3afd8410014e3b0e1602 (diff)
downloadspark-b5f02d6743ecb1633b7b13382f76cb8bfc2aa95c.tar.gz
spark-b5f02d6743ecb1633b7b13382f76cb8bfc2aa95c.tar.bz2
spark-b5f02d6743ecb1633b7b13382f76cb8bfc2aa95c.zip
[SPARK-13583][CORE][STREAMING] Remove unused imports and add checkstyle rule
## What changes were proposed in this pull request? After SPARK-6990, `dev/lint-java` keeps Java code healthy and helps PR review by saving much time. This issue aims remove unused imports from Java/Scala code and add `UnusedImports` checkstyle rule to help developers. ## How was this patch tested? ``` ./dev/lint-java ./build/sbt compile ``` Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11438 from dongjoon-hyun/SPARK-13583.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java1
-rw-r--r--examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalPi.scala3
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LogQuery.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkTC.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala3
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala3
25 files changed, 3 insertions, 34 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
index 0448a1a0c8..1a6caa8cf8 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
@@ -28,7 +28,6 @@ import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import java.io.Serializable;
-import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
index 5ba01e0d08..4717155438 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
@@ -25,10 +25,8 @@ import scala.Tuple2;
import org.apache.spark.api.java.*;
import org.apache.spark.mllib.evaluation.MultilabelMetrics;
-import org.apache.spark.rdd.RDD;
import org.apache.spark.SparkConf;
// $example off$
-import org.apache.spark.SparkContext;
public class JavaMultiLabelClassificationMetricsExample {
public static void main(String[] args) {
diff --git a/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala b/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
index e37a3fa69d..743fc13db7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
@@ -23,7 +23,6 @@ import java.io.File
import scala.io.Source._
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* Simple test for reading and writing to a distributed
diff --git a/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala b/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala
index fa4a3afeec..08b6c717d4 100644
--- a/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala
@@ -21,7 +21,6 @@ package org.apache.spark.examples
import java.util.Random
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* Usage: GroupByTest [numMappers] [numKVPairs] [KeySize] [numReducers]
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
index 407e3e08b9..19bebffcb0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
@@ -25,8 +25,6 @@ import scala.collection.mutable.HashSet
import breeze.linalg.{squaredDistance, DenseVector, Vector}
-import org.apache.spark.SparkContext._
-
/**
* K-means clustering.
*
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
index 3d923625f1..720d92fb9d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
@@ -20,9 +20,6 @@ package org.apache.spark.examples
import scala.math.random
-import org.apache.spark._
-import org.apache.spark.SparkContext._
-
object LocalPi {
def main(args: Array[String]) {
var count = 0
diff --git a/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala b/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala
index a80de10f46..c55b68e033 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala
@@ -19,7 +19,6 @@
package org.apache.spark.examples
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* Executes a roll up-style query against Apache logs.
diff --git a/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala b/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala
index 3b0b00fe4d..7c09664c2f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala
@@ -21,7 +21,6 @@ package org.apache.spark.examples
import java.util.Random
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* Usage: SimpleSkewedGroupByTest [numMappers] [numKVPairs] [valSize] [numReducers] [ratio]
diff --git a/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala b/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala
index 719e2176fe..7796f362bb 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala
@@ -21,7 +21,6 @@ package org.apache.spark.examples
import java.util.Random
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* Usage: GroupByTest [numMappers] [numKVPairs] [KeySize] [numReducers]
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
index 1ea9121e27..d9f94a42b1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
@@ -21,7 +21,6 @@ package org.apache.spark.examples
import breeze.linalg.{squaredDistance, DenseVector, Vector}
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* K-means clustering.
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala b/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
index 018bdf6d31..2664ddbb87 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
@@ -19,7 +19,6 @@
package org.apache.spark.examples
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* Computes the PageRank of URLs from an input file. Input file should
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala b/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala
index b92740f1fb..fc7a1f859f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala
@@ -22,7 +22,6 @@ import scala.collection.mutable
import scala.util.Random
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
/**
* Transitive closure on a graph.
diff --git a/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala b/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala
index 41ca5cbb9f..6d2228c874 100644
--- a/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala
@@ -21,7 +21,6 @@ package org.apache.spark.examples.graphx
import java.io.{FileOutputStream, PrintWriter}
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
import org.apache.spark.graphx.{GraphXUtils, PartitionStrategy}
import org.apache.spark.graphx.util.GraphGenerators
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
index 50998c94de..25be87811d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
@@ -18,15 +18,13 @@
// scalastyle:off println
package org.apache.spark.examples.ml
-import scala.collection.mutable
import scala.language.reflectiveCalls
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.examples.mllib.AbstractParams
-import org.apache.spark.ml.{Pipeline, PipelineStage}
-import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel}
+import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.sql.DataFrame
/**
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
index eda211b5a8..5ff3d36242 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
@@ -21,7 +21,6 @@ package org.apache.spark.examples.mllib
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.distributed.{MatrixEntry, RowMatrix}
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
index 69691ae297..09750e53cb 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
@@ -24,7 +24,6 @@ import org.apache.log4j.{Level, Logger}
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
index 011db4fd0c..0da4005977 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
@@ -21,7 +21,6 @@ package org.apache.spark.examples.mllib
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.SparkContext._
import org.apache.spark.mllib.util.MLUtils
/**
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
index 9f7c7d50e5..2770b8af1c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
@@ -19,7 +19,6 @@
package org.apache.spark.examples.streaming
import scala.collection.mutable.LinkedHashSet
-import scala.reflect.ClassTag
import scala.util.Random
import akka.actor._
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
index ad13d437dd..5ce5778e42 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
@@ -18,7 +18,7 @@
// scalastyle:off println
package org.apache.spark.examples.streaming
-import java.io.{BufferedReader, InputStream, InputStreamReader}
+import java.io.{BufferedReader, InputStreamReader}
import java.net.Socket
import org.apache.spark.{Logging, SparkConf}
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala
index fe3b79ed5d..dd725d72c2 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala
@@ -18,10 +18,7 @@
// scalastyle:off println
package org.apache.spark.examples.streaming
-import java.net.InetSocketAddress
-
import org.apache.spark.SparkConf
-import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.flume._
import org.apache.spark.util.IntParam
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 9aa0f54312..3727f8fe6a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -24,7 +24,6 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext, Time}
-import org.apache.spark.util.IntParam
/**
* Use DataFrames and SQL to count words in UTF8 encoded, '\n' delimited text received from the
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
index c85d6843dc..2811e67009 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
@@ -18,7 +18,6 @@
// scalastyle:off println
package org.apache.spark.examples.streaming
-import org.apache.spark.HashPartitioner
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
index 825c671a92..5af82e161a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
@@ -22,7 +22,6 @@ import com.twitter.algebird._
import com.twitter.algebird.CMSHasherImplicits._
import org.apache.spark.SparkConf
-import org.apache.spark.SparkContext._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.twitter._
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
index 49cee1b43c..c386e39d52 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
@@ -19,7 +19,6 @@
package org.apache.spark.examples.streaming
import org.apache.spark.streaming.{Seconds, StreamingContext}
-import org.apache.spark.SparkContext._
import org.apache.spark.streaming.twitter._
import org.apache.spark.SparkConf
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
index f612e508eb..99b561750b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
@@ -25,9 +25,8 @@ import akka.actor.actorRef2Scala
import akka.util.ByteString
import akka.zeromq._
import akka.zeromq.Subscribe
-import com.typesafe.config.ConfigFactory
-import org.apache.spark.{SparkConf, TaskContext}
+import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.zeromq._