aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Alter <jonalter@users.noreply.github.com>2015-07-10 11:34:01 +0100
committerSean Owen <sowen@cloudera.com>2015-07-10 11:34:01 +0100
commite14b545d2dcbc4587688b4c46718d3680b0a2f67 (patch)
tree8ae5fe8258bce4fd1cb2d55c7031de8b1dcc3963
parentd538919cc4fd3ab940d478c62dce1bae0270cfeb (diff)
downloadspark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.tar.gz
spark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.tar.bz2
spark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.zip
[SPARK-7977] [BUILD] Disallowing println
Author: Jonathan Alter <jonalter@users.noreply.github.com> Closes #7093 from jonalter/SPARK-7977 and squashes the following commits: ccd44cc [Jonathan Alter] Changed println to log in ThreadingSuite 7fcac3e [Jonathan Alter] Reverting to println in ThreadingSuite 10724b6 [Jonathan Alter] Changing some printlns to logs in tests eeec1e7 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 0b1dcb4 [Jonathan Alter] More println cleanup aedaf80 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 925fd98 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 0c16fa3 [Jonathan Alter] Replacing some printlns with logs 45c7e05 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 5c8e283 [Jonathan Alter] Allowing println in audit-release examples 5b50da1 [Jonathan Alter] Allowing printlns in example files ca4b477 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 83ab635 [Jonathan Alter] Fixing new printlns 54b131f [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 1cd8a81 [Jonathan Alter] Removing some unnecessary comments and printlns b837c3a [Jonathan Alter] Disallowing println
-rw-r--r--core/src/main/scala/org/apache/spark/Logging.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/r/RBackend.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/r/RRDD.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/Client.scala30
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/RRunner.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala18
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala7
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala22
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala34
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/ui/JettyUtils.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/util/Distribution.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/DistributedSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/FailureSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/FileServerSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/ThreadingSuite.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala8
-rw-r--r--core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/UtilsSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala10
-rw-r--r--dev/audit-release/sbt_app_core/src/main/scala/SparkApp.scala2
-rw-r--r--dev/audit-release/sbt_app_ganglia/src/main/scala/SparkApp.scala2
-rw-r--r--dev/audit-release/sbt_app_graphx/src/main/scala/GraphxApp.scala2
-rw-r--r--dev/audit-release/sbt_app_hive/src/main/scala/HiveApp.scala2
-rw-r--r--dev/audit-release/sbt_app_kinesis/src/main/scala/SparkApp.scala2
-rw-r--r--dev/audit-release/sbt_app_sql/src/main/scala/SqlApp.scala2
-rw-r--r--dev/audit-release/sbt_app_streaming/src/main/scala/StreamingApp.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalALS.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalPi.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LogQuery.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkALS.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkPi.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkTC.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/graphx/LiveJournalPageRank.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/CrossValidatorExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DenseGaussianMixture.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala3
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/RandomRDDGeneration.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLinearRegression.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLogisticRegression.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnyPCA.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnySVD.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/DirectKafkaWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala2
-rw-r--r--external/kafka/src/test/scala/org/apache/spark/streaming/kafka/DirectKafkaStreamSuite.scala2
-rw-r--r--extras/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala2
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala1
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala4
-rw-r--r--graphx/src/test/scala/org/apache/spark/graphx/util/BytecodeUtilsSuite.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala2
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala10
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala6
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala6
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/tree/GradientBoostedTreesSuite.scala10
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala2
-rw-r--r--project/SparkBuild.scala4
-rw-r--r--repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkCommandLine.scala2
-rw-r--r--repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala2
-rw-r--r--repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala2
-rw-r--r--repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala2
-rw-r--r--repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala2
-rw-r--r--repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkIMain.scala4
-rw-r--r--repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkReplReporter.scala2
-rw-r--r--scalastyle-config.xml12
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Column.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala16
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala12
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala5
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala5
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala2
-rw-r--r--sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala1
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/util/RecurringTimer.scala4
-rw-r--r--streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala4
-rw-r--r--streaming/src/test/scala/org/apache/spark/streaming/scheduler/JobGeneratorSuite.scala1
-rw-r--r--tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala8
-rw-r--r--tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala4
-rw-r--r--tools/src/main/scala/org/apache/spark/tools/StoragePerfTester.scala4
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala4
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala2
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala4
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala4
182 files changed, 478 insertions, 135 deletions
diff --git a/core/src/main/scala/org/apache/spark/Logging.scala b/core/src/main/scala/org/apache/spark/Logging.scala
index 7fcb7830e7..87ab099267 100644
--- a/core/src/main/scala/org/apache/spark/Logging.scala
+++ b/core/src/main/scala/org/apache/spark/Logging.scala
@@ -121,6 +121,7 @@ trait Logging {
if (usingLog4j12) {
val log4j12Initialized = LogManager.getRootLogger.getAllAppenders.hasMoreElements
if (!log4j12Initialized) {
+ // scalastyle:off println
if (Utils.isInInterpreter) {
val replDefaultLogProps = "org/apache/spark/log4j-defaults-repl.properties"
Option(Utils.getSparkClassLoader.getResource(replDefaultLogProps)) match {
@@ -141,6 +142,7 @@ trait Logging {
System.err.println(s"Spark was unable to load $defaultLogProps")
}
}
+ // scalastyle:on println
}
}
Logging.initialized = true
diff --git a/core/src/main/scala/org/apache/spark/api/r/RBackend.scala b/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
index 1a5f2bca26..b7e72d4d0e 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
@@ -95,7 +95,9 @@ private[spark] class RBackend {
private[spark] object RBackend extends Logging {
def main(args: Array[String]): Unit = {
if (args.length < 1) {
+ // scalastyle:off println
System.err.println("Usage: RBackend <tempFilePath>")
+ // scalastyle:on println
System.exit(-1)
}
val sparkRBackend = new RBackend()
diff --git a/core/src/main/scala/org/apache/spark/api/r/RRDD.scala b/core/src/main/scala/org/apache/spark/api/r/RRDD.scala
index 524676544d..ff1702f7de 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RRDD.scala
@@ -161,7 +161,9 @@ private abstract class BaseRRDD[T: ClassTag, U: ClassTag](
dataOut.write(elem.asInstanceOf[Array[Byte]])
} else if (deserializer == SerializationFormats.STRING) {
// write string(for StringRRDD)
+ // scalastyle:off println
printOut.println(elem)
+ // scalastyle:on println
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala b/core/src/main/scala/org/apache/spark/deploy/Client.scala
index 71f7e21291..f03875a3e8 100644
--- a/core/src/main/scala/org/apache/spark/deploy/Client.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala
@@ -118,26 +118,26 @@ private class ClientEndpoint(
def pollAndReportStatus(driverId: String) {
// Since ClientEndpoint is the only RpcEndpoint in the process, blocking the event loop thread
// is fine.
- println("... waiting before polling master for driver state")
+ logInfo("... waiting before polling master for driver state")
Thread.sleep(5000)
- println("... polling master for driver state")
+ logInfo("... polling master for driver state")
val statusResponse =
activeMasterEndpoint.askWithRetry[DriverStatusResponse](RequestDriverStatus(driverId))
statusResponse.found match {
case false =>
- println(s"ERROR: Cluster master did not recognize $driverId")
+ logError(s"ERROR: Cluster master did not recognize $driverId")
System.exit(-1)
case true =>
- println(s"State of $driverId is ${statusResponse.state.get}")
+ logInfo(s"State of $driverId is ${statusResponse.state.get}")
// Worker node, if present
(statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match {
case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) =>
- println(s"Driver running on $hostPort ($id)")
+ logInfo(s"Driver running on $hostPort ($id)")
case _ =>
}
// Exception, if present
statusResponse.exception.map { e =>
- println(s"Exception from cluster was: $e")
+ logError(s"Exception from cluster was: $e")
e.printStackTrace()
System.exit(-1)
}
@@ -148,7 +148,7 @@ private class ClientEndpoint(
override def receive: PartialFunction[Any, Unit] = {
case SubmitDriverResponse(master, success, driverId, message) =>
- println(message)
+ logInfo(message)
if (success) {
activeMasterEndpoint = master
pollAndReportStatus(driverId.get)
@@ -158,7 +158,7 @@ private class ClientEndpoint(
case KillDriverResponse(master, driverId, success, message) =>
- println(message)
+ logInfo(message)
if (success) {
activeMasterEndpoint = master
pollAndReportStatus(driverId)
@@ -169,13 +169,13 @@ private class ClientEndpoint(
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
if (!lostMasters.contains(remoteAddress)) {
- println(s"Error connecting to master $remoteAddress.")
+ logError(s"Error connecting to master $remoteAddress.")
lostMasters += remoteAddress
// Note that this heuristic does not account for the fact that a Master can recover within
// the lifetime of this client. Thus, once a Master is lost it is lost to us forever. This
// is not currently a concern, however, because this client does not retry submissions.
if (lostMasters.size >= masterEndpoints.size) {
- println("No master is available, exiting.")
+ logError("No master is available, exiting.")
System.exit(-1)
}
}
@@ -183,18 +183,18 @@ private class ClientEndpoint(
override def onNetworkError(cause: Throwable, remoteAddress: RpcAddress): Unit = {
if (!lostMasters.contains(remoteAddress)) {
- println(s"Error connecting to master ($remoteAddress).")
- println(s"Cause was: $cause")
+ logError(s"Error connecting to master ($remoteAddress).")
+ logError(s"Cause was: $cause")
lostMasters += remoteAddress
if (lostMasters.size >= masterEndpoints.size) {
- println("No master is available, exiting.")
+ logError("No master is available, exiting.")
System.exit(-1)
}
}
}
override def onError(cause: Throwable): Unit = {
- println(s"Error processing messages, exiting.")
+ logError(s"Error processing messages, exiting.")
cause.printStackTrace()
System.exit(-1)
}
@@ -209,10 +209,12 @@ private class ClientEndpoint(
*/
object Client {
def main(args: Array[String]) {
+ // scalastyle:off println
if (!sys.props.contains("SPARK_SUBMIT")) {
println("WARNING: This client is deprecated and will be removed in a future version of Spark")
println("Use ./bin/spark-submit with \"--master spark://host:port\"")
}
+ // scalastyle:on println
val conf = new SparkConf()
val driverArgs = new ClientArguments(args)
diff --git a/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala b/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
index 42d3296062..72cc330a39 100644
--- a/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
@@ -72,9 +72,11 @@ private[deploy] class ClientArguments(args: Array[String]) {
cmd = "launch"
if (!ClientArguments.isValidJarUrl(_jarUrl)) {
+ // scalastyle:off println
println(s"Jar url '${_jarUrl}' is not in valid format.")
println(s"Must be a jar file path in URL format " +
"(e.g. hdfs://host:port/XX.jar, file:///XX.jar)")
+ // scalastyle:on println
printUsageAndExit(-1)
}
@@ -110,7 +112,9 @@ private[deploy] class ClientArguments(args: Array[String]) {
| (default: $DEFAULT_SUPERVISE)
| -v, --verbose Print more debugging output
""".stripMargin
+ // scalastyle:off println
System.err.println(usage)
+ // scalastyle:on println
System.exit(exitCode)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/RRunner.scala b/core/src/main/scala/org/apache/spark/deploy/RRunner.scala
index e99779f299..4165740312 100644
--- a/core/src/main/scala/org/apache/spark/deploy/RRunner.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/RRunner.scala
@@ -85,7 +85,9 @@ object RRunner {
}
System.exit(returnCode)
} else {
+ // scalastyle:off println
System.err.println("SparkR backend did not initialize in " + backendTimeout + " seconds")
+ // scalastyle:on println
System.exit(-1)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index b1d6ec209d..4cec9017b8 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -82,6 +82,7 @@ object SparkSubmit {
private val CLASS_NOT_FOUND_EXIT_STATUS = 101
+ // scalastyle:off println
// Exposed for testing
private[spark] var exitFn: Int => Unit = (exitCode: Int) => System.exit(exitCode)
private[spark] var printStream: PrintStream = System.err
@@ -102,11 +103,14 @@ object SparkSubmit {
printStream.println("Type --help for more information.")
exitFn(0)
}
+ // scalastyle:on println
def main(args: Array[String]): Unit = {
val appArgs = new SparkSubmitArguments(args)
if (appArgs.verbose) {
+ // scalastyle:off println
printStream.println(appArgs)
+ // scalastyle:on println
}
appArgs.action match {
case SparkSubmitAction.SUBMIT => submit(appArgs)
@@ -160,7 +164,9 @@ object SparkSubmit {
// makes the message printed to the output by the JVM not very helpful. Instead,
// detect exceptions with empty stack traces here, and treat them differently.
if (e.getStackTrace().length == 0) {
+ // scalastyle:off println
printStream.println(s"ERROR: ${e.getClass().getName()}: ${e.getMessage()}")
+ // scalastyle:on println
exitFn(1)
} else {
throw e
@@ -178,7 +184,9 @@ object SparkSubmit {
// to use the legacy gateway if the master endpoint turns out to be not a REST server.
if (args.isStandaloneCluster && args.useRest) {
try {
+ // scalastyle:off println
printStream.println("Running Spark using the REST application submission protocol.")
+ // scalastyle:on println
doRunMain()
} catch {
// Fail over to use the legacy submission gateway
@@ -558,6 +566,7 @@ object SparkSubmit {
sysProps: Map[String, String],
childMainClass: String,
verbose: Boolean): Unit = {
+ // scalastyle:off println
if (verbose) {
printStream.println(s"Main class:\n$childMainClass")
printStream.println(s"Arguments:\n${childArgs.mkString("\n")}")
@@ -565,6 +574,7 @@ object SparkSubmit {
printStream.println(s"Classpath elements:\n${childClasspath.mkString("\n")}")
printStream.println("\n")
}
+ // scalastyle:on println
val loader =
if (sysProps.getOrElse("spark.driver.userClassPathFirst", "false").toBoolean) {
@@ -592,8 +602,10 @@ object SparkSubmit {
case e: ClassNotFoundException =>
e.printStackTrace(printStream)
if (childMainClass.contains("thriftserver")) {
+ // scalastyle:off println
printStream.println(s"Failed to load main class $childMainClass.")
printStream.println("You need to build Spark with -Phive and -Phive-thriftserver.")
+ // scalastyle:on println
}
System.exit(CLASS_NOT_FOUND_EXIT_STATUS)
}
@@ -766,7 +778,9 @@ private[spark] object SparkSubmitUtils {
brr.setRoot(repo)
brr.setName(s"repo-${i + 1}")
cr.add(brr)
+ // scalastyle:off println
printStream.println(s"$repo added as a remote repository with the name: ${brr.getName}")
+ // scalastyle:on println
}
}
@@ -829,7 +843,9 @@ private[spark] object SparkSubmitUtils {
val ri = ModuleRevisionId.newInstance(mvn.groupId, mvn.artifactId, mvn.version)
val dd = new DefaultDependencyDescriptor(ri, false, false)
dd.addDependencyConfiguration(ivyConfName, ivyConfName)
+ // scalastyle:off println
printStream.println(s"${dd.getDependencyId} added as a dependency")
+ // scalastyle:on println
md.addDependency(dd)
}
}
@@ -896,9 +912,11 @@ private[spark] object SparkSubmitUtils {
ivySettings.setDefaultCache(new File(alternateIvyCache, "cache"))
new File(alternateIvyCache, "jars")
}
+ // scalastyle:off println
printStream.println(
s"Ivy Default Cache set to: ${ivySettings.getDefaultCache.getAbsolutePath}")
printStream.println(s"The jars for the packages stored in: $packagesDirectory")
+ // scalastyle:on println
// create a pattern matcher
ivySettings.addMatcher(new GlobPatternMatcher)
// create the dependency resolvers
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
index 6e3c0b21b3..ebb39c354d 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
@@ -79,6 +79,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
/** Default properties present in the currently defined defaults file. */
lazy val defaultSparkProperties: HashMap[String, String] = {
val defaultProperties = new HashMap[String, String]()
+ // scalastyle:off println
if (verbose) SparkSubmit.printStream.println(s"Using properties file: $propertiesFile")
Option(propertiesFile).foreach { filename =>
Utils.getPropertiesFromFile(filename).foreach { case (k, v) =>
@@ -86,6 +87,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
if (verbose) SparkSubmit.printStream.println(s"Adding default property: $k=$v")
}
}
+ // scalastyle:on println
defaultProperties
}
@@ -452,6 +454,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
}
private def printUsageAndExit(exitCode: Int, unknownParam: Any = null): Unit = {
+ // scalastyle:off println
val outStream = SparkSubmit.printStream
if (unknownParam != null) {
outStream.println("Unknown/unsupported param " + unknownParam)
@@ -541,6 +544,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
outStream.println("CLI options:")
outStream.println(getSqlShellOptions())
}
+ // scalastyle:on println
SparkSubmit.exitFn(exitCode)
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala b/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala
index c5ac45c673..a98b1fa8f8 100644
--- a/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala
@@ -19,7 +19,9 @@ package org.apache.spark.deploy.client
private[spark] object TestExecutor {
def main(args: Array[String]) {
+ // scalastyle:off println
println("Hello world!")
+ // scalastyle:on println
while (true) {
Thread.sleep(1000)
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
index 4692d22651..18265df9fa 100644
--- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
@@ -56,6 +56,7 @@ private[history] class HistoryServerArguments(conf: SparkConf, args: Array[Strin
Utils.loadDefaultSparkProperties(conf, propertiesFile)
private def printUsageAndExit(exitCode: Int) {
+ // scalastyle:off println
System.err.println(
"""
|Usage: HistoryServer [options]
@@ -84,6 +85,7 @@ private[history] class HistoryServerArguments(conf: SparkConf, args: Array[Strin
| spark.history.fs.updateInterval How often to reload log data from storage
| (in seconds, default: 10)
|""".stripMargin)
+ // scalastyle:on println
System.exit(exitCode)
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala b/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
index 435b9b12f8..44cefbc77f 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
@@ -85,6 +85,7 @@ private[master] class MasterArguments(args: Array[String], conf: SparkConf) {
* Print usage and exit JVM with the given exit code.
*/
private def printUsageAndExit(exitCode: Int) {
+ // scalastyle:off println
System.err.println(
"Usage: Master [options]\n" +
"\n" +
@@ -95,6 +96,7 @@ private[master] class MasterArguments(args: Array[String], conf: SparkConf) {
" --webui-port PORT Port for web UI (default: 8080)\n" +
" --properties-file FILE Path to a custom Spark properties file.\n" +
" Default is conf/spark-defaults.conf.")
+ // scalastyle:on println
System.exit(exitCode)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala b/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
index 894cb78d85..5accaf78d0 100644
--- a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
@@ -54,7 +54,9 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
case ("--master" | "-m") :: value :: tail =>
if (!value.startsWith("mesos://")) {
+ // scalastyle:off println
System.err.println("Cluster dispatcher only supports mesos (uri begins with mesos://)")
+ // scalastyle:on println
System.exit(1)
}
masterUrl = value.stripPrefix("mesos://")
@@ -73,7 +75,9 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
case Nil => {
if (masterUrl == null) {
+ // scalastyle:off println
System.err.println("--master is required")
+ // scalastyle:on println
printUsageAndExit(1)
}
}
@@ -83,6 +87,7 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
}
private def printUsageAndExit(exitCode: Int): Unit = {
+ // scalastyle:off println
System.err.println(
"Usage: MesosClusterDispatcher [options]\n" +
"\n" +
@@ -96,6 +101,7 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
" Zookeeper for persistence\n" +
" --properties-file FILE Path to a custom Spark properties file.\n" +
" Default is conf/spark-defaults.conf.")
+ // scalastyle:on println
System.exit(exitCode)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala b/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala
index d1a12b01e7..2d6be3042c 100644
--- a/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala
@@ -60,7 +60,9 @@ object DriverWrapper {
rpcEnv.shutdown()
case _ =>
+ // scalastyle:off println
System.err.println("Usage: DriverWrapper <workerUrl> <userJar> <driverMainClass> [options]")
+ // scalastyle:on println
System.exit(-1)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
index 1d2ecab517..e89d076802 100644
--- a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
@@ -121,6 +121,7 @@ private[worker] class WorkerArguments(args: Array[String], conf: SparkConf) {
* Print usage and exit JVM with the given exit code.
*/
def printUsageAndExit(exitCode: Int) {
+ // scalastyle:off println
System.err.println(
"Usage: Worker [options] <master>\n" +
"\n" +
@@ -136,6 +137,7 @@ private[worker] class WorkerArguments(args: Array[String], conf: SparkConf) {
" --webui-port PORT Port for web UI (default: 8081)\n" +
" --properties-file FILE Path to a custom Spark properties file.\n" +
" Default is conf/spark-defaults.conf.")
+ // scalastyle:on println
System.exit(exitCode)
}
@@ -160,7 +162,9 @@ private[worker] class WorkerArguments(args: Array[String], conf: SparkConf) {
} catch {
case e: Exception => {
totalMb = 2*1024
+ // scalastyle:off println
System.out.println("Failed to get total physical memory. Using " + totalMb + " MB")
+ // scalastyle:on println
}
}
// Leave out 1 GB for the operating system, but don't return a negative memory size
diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index 34d4cfdca7..fcd76ec527 100644
--- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -235,7 +235,9 @@ private[spark] object CoarseGrainedExecutorBackend extends Logging {
argv = tail
case Nil =>
case tail =>
+ // scalastyle:off println
System.err.println(s"Unrecognized options: ${tail.mkString(" ")}")
+ // scalastyle:on println
printUsageAndExit()
}
}
@@ -249,6 +251,7 @@ private[spark] object CoarseGrainedExecutorBackend extends Logging {
}
private def printUsageAndExit() = {
+ // scalastyle:off println
System.err.println(
"""
|"Usage: CoarseGrainedExecutorBackend [options]
@@ -262,6 +265,7 @@ private[spark] object CoarseGrainedExecutorBackend extends Logging {
| --worker-url <workerUrl>
| --user-class-path <url>
|""".stripMargin)
+ // scalastyle:on println
System.exit(1)
}
diff --git a/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala b/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala
index c219d21fbe..532850dd57 100644
--- a/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala
+++ b/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala
@@ -21,6 +21,8 @@ import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable}
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.{InputSplit, JobContext, RecordReader, TaskAttemptContext}
+
+import org.apache.spark.Logging
import org.apache.spark.deploy.SparkHadoopUtil
/**
@@ -39,7 +41,8 @@ private[spark] object FixedLengthBinaryInputFormat {
}
private[spark] class FixedLengthBinaryInputFormat
- extends FileInputFormat[LongWritable, BytesWritable] {
+ extends FileInputFormat[LongWritable, BytesWritable]
+ with Logging {
private var recordLength = -1
@@ -51,7 +54,7 @@ private[spark] class FixedLengthBinaryInputFormat
recordLength = FixedLengthBinaryInputFormat.getRecordLength(context)
}
if (recordLength <= 0) {
- println("record length is less than 0, file cannot be split")
+ logDebug("record length is less than 0, file cannot be split")
false
} else {
true
diff --git a/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala b/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
index 67a3761029..79cb0640c8 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
@@ -57,16 +57,6 @@ private[nio] class BlockMessage() {
}
def set(buffer: ByteBuffer) {
- /*
- println()
- println("BlockMessage: ")
- while(buffer.remaining > 0) {
- print(buffer.get())
- }
- buffer.rewind()
- println()
- println()
- */
typ = buffer.getInt()
val idLength = buffer.getInt()
val idBuilder = new StringBuilder(idLength)
@@ -138,18 +128,6 @@ private[nio] class BlockMessage() {
buffers += data
}
- /*
- println()
- println("BlockMessage: ")
- buffers.foreach(b => {
- while(b.remaining > 0) {
- print(b.get())
- }
- b.rewind()
- })
- println()
- println()
- */
Message.createBufferMessage(buffers)
}
diff --git a/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala b/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala
index 7d0806f0c2..f1c9ea8b64 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala
@@ -43,16 +43,6 @@ class BlockMessageArray(var blockMessages: Seq[BlockMessage])
val newBlockMessages = new ArrayBuffer[BlockMessage]()
val buffer = bufferMessage.buffers(0)
buffer.clear()
- /*
- println()
- println("BlockMessageArray: ")
- while(buffer.remaining > 0) {
- print(buffer.get())
- }
- buffer.rewind()
- println()
- println()
- */
while (buffer.remaining() > 0) {
val size = buffer.getInt()
logDebug("Creating block message of size " + size + " bytes")
@@ -86,23 +76,11 @@ class BlockMessageArray(var blockMessages: Seq[BlockMessage])
logDebug("Buffer list:")
buffers.foreach((x: ByteBuffer) => logDebug("" + x))
- /*
- println()
- println("BlockMessageArray: ")
- buffers.foreach(b => {
- while(b.remaining > 0) {
- print(b.get())
- }
- b.rewind()
- })
- println()
- println()
- */
Message.createBufferMessage(buffers)
}
}
-private[nio] object BlockMessageArray {
+private[nio] object BlockMessageArray extends Logging {
def fromBufferMessage(bufferMessage: BufferMessage): BlockMessageArray = {
val newBlockMessageArray = new BlockMessageArray()
@@ -123,10 +101,10 @@ private[nio] object BlockMessageArray {
}
}
val blockMessageArray = new BlockMessageArray(blockMessages)
- println("Block message array created")
+ logDebug("Block message array created")
val bufferMessage = blockMessageArray.toBufferMessage
- println("Converted to buffer message")
+ logDebug("Converted to buffer message")
val totalSize = bufferMessage.size
val newBuffer = ByteBuffer.allocate(totalSize)
@@ -138,10 +116,11 @@ private[nio] object BlockMessageArray {
})
newBuffer.flip
val newBufferMessage = Message.createBufferMessage(newBuffer)
- println("Copied to new buffer message, size = " + newBufferMessage.size)
+ logDebug("Copied to new buffer message, size = " + newBufferMessage.size)
val newBlockMessageArray = BlockMessageArray.fromBufferMessage(newBufferMessage)
- println("Converted back to block message array")
+ logDebug("Converted back to block message array")
+ // scalastyle:off println
newBlockMessageArray.foreach(blockMessage => {
blockMessage.getType match {
case BlockMessage.TYPE_PUT_BLOCK => {
@@ -154,6 +133,7 @@ private[nio] object BlockMessageArray {
}
}
})
+ // scalastyle:on println
}
}
diff --git a/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala b/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
index c0bca2c4bc..9143918790 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
@@ -1016,7 +1016,9 @@ private[spark] object ConnectionManager {
val conf = new SparkConf
val manager = new ConnectionManager(9999, conf, new SecurityManager(conf))
manager.onReceiveMessage((msg: Message, id: ConnectionManagerId) => {
+ // scalastyle:off println
println("Received [" + msg + "] from [" + id + "]")
+ // scalastyle:on println
None
})
@@ -1033,6 +1035,7 @@ private[spark] object ConnectionManager {
System.gc()
}
+ // scalastyle:off println
def testSequentialSending(manager: ConnectionManager) {
println("--------------------------")
println("Sequential Sending")
@@ -1150,4 +1153,5 @@ private[spark] object ConnectionManager {
println()
}
}
+ // scalastyle:on println
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala
index dc60d48927..defdabf95a 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala
@@ -123,7 +123,9 @@ private[spark] class PipedRDD[T: ClassTag](
new Thread("stderr reader for " + command) {
override def run() {
for (line <- Source.fromInputStream(proc.getErrorStream).getLines) {
+ // scalastyle:off println
System.err.println(line)
+ // scalastyle:on println
}
}
}.start()
@@ -133,6 +135,7 @@ private[spark] class PipedRDD[T: ClassTag](
override def run() {
val out = new PrintWriter(proc.getOutputStream)
+ // scalastyle:off println
// input the pipe context firstly
if (printPipeContext != null) {
printPipeContext(out.println(_))
@@ -144,6 +147,7 @@ private[spark] class PipedRDD[T: ClassTag](
out.println(elem)
}
}
+ // scalastyle:on println
out.close()
}
}.start()
diff --git a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
index 529a5b2bf1..62b05033a9 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
@@ -140,7 +140,9 @@ private[spark] class EventLoggingListener(
/** Log the event as JSON. */
private def logEvent(event: SparkListenerEvent, flushLogger: Boolean = false) {
val eventJson = JsonProtocol.sparkEventToJson(event)
+ // scalastyle:off println
writer.foreach(_.println(compact(render(eventJson))))
+ // scalastyle:on println
if (flushLogger) {
writer.foreach(_.flush())
hadoopDataStream.foreach(hadoopFlushMethod.invoke(_))
diff --git a/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala b/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
index e55b76c36c..f96eb8ca0a 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
@@ -125,7 +125,9 @@ class JobLogger(val user: String, val logDirName: String) extends SparkListener
val date = new Date(System.currentTimeMillis())
writeInfo = dateFormat.get.format(date) + ": " + info
}
+ // scalastyle:off println
jobIdToPrintWriter.get(jobId).foreach(_.println(writeInfo))
+ // scalastyle:on println
}
/**
diff --git a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
index f413c1d37f..c8356467fa 100644
--- a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
+++ b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
@@ -68,7 +68,9 @@ private[spark] object JettyUtils extends Logging {
response.setStatus(HttpServletResponse.SC_OK)
val result = servletParams.responder(request)
response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate")
+ // scalastyle:off println
response.getWriter.println(servletParams.extractFn(result))
+ // scalastyle:on println
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED)
response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate")
diff --git a/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala b/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala
index ba03acdb38..5a8c291431 100644
--- a/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala
+++ b/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala
@@ -38,9 +38,11 @@ private[spark] object UIWorkloadGenerator {
def main(args: Array[String]) {
if (args.length < 3) {
+ // scalastyle:off println
println(
- "usage: ./bin/spark-class org.apache.spark.ui.UIWorkloadGenerator " +
+ "Usage: ./bin/spark-class org.apache.spark.ui.UIWorkloadGenerator " +
"[master] [FIFO|FAIR] [#job set (4 jobs per set)]")
+ // scalastyle:on println
System.exit(1)
}
@@ -96,6 +98,7 @@ private[spark] object UIWorkloadGenerator {
for ((desc, job) <- jobs) {
new Thread {
override def run() {
+ // scalastyle:off println
try {
setProperties(desc)
job()
@@ -106,6 +109,7 @@ private[spark] object UIWorkloadGenerator {
} finally {
barrier.release()
}
+ // scalastyle:on println
}
}.start
Thread.sleep(INTER_JOB_WAIT_MS)
diff --git a/core/src/main/scala/org/apache/spark/util/Distribution.scala b/core/src/main/scala/org/apache/spark/util/Distribution.scala
index 1bab707235..950b69f7db 100644
--- a/core/src/main/scala/org/apache/spark/util/Distribution.scala
+++ b/core/src/main/scala/org/apache/spark/util/Distribution.scala
@@ -52,9 +52,11 @@ private[spark] class Distribution(val data: Array[Double], val startIdx: Int, va
}
def showQuantiles(out: PrintStream = System.out): Unit = {
+ // scalastyle:off println
out.println("min\t25%\t50%\t75%\tmax")
getQuantiles(defaultProbabilities).foreach{q => out.print(q + "\t")}
out.println
+ // scalastyle:on println
}
def statCounter: StatCounter = StatCounter(data.slice(startIdx, endIdx))
@@ -64,8 +66,10 @@ private[spark] class Distribution(val data: Array[Double], val startIdx: Int, va
* @param out
*/
def summary(out: PrintStream = System.out) {
+ // scalastyle:off println
out.println(statCounter)
showQuantiles(out)
+ // scalastyle:on println
}
}
@@ -80,8 +84,10 @@ private[spark] object Distribution {
}
def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) {
+ // scalastyle:off println
out.println("min\t25%\t50%\t75%\tmax")
quantiles.foreach{q => out.print(q + "\t")}
out.println
+ // scalastyle:on println
}
}
diff --git a/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala b/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala
index c4a7b4441c..85fb923cd9 100644
--- a/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala
+++ b/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala
@@ -70,12 +70,14 @@ private[spark] object XORShiftRandom {
* @param args takes one argument - the number of random numbers to generate
*/
def main(args: Array[String]): Unit = {
+ // scalastyle:off println
if (args.length != 1) {
println("Benchmark of XORShiftRandom vis-a-vis java.util.Random")
println("Usage: XORShiftRandom number_of_random_numbers_to_generate")
System.exit(1)
}
println(benchmark(args(0).toInt))
+ // scalastyle:on println
}
/**
diff --git a/core/src/test/scala/org/apache/spark/DistributedSuite.scala b/core/src/test/scala/org/apache/spark/DistributedSuite.scala
index 9c191ed522..2300bcff4f 100644
--- a/core/src/test/scala/org/apache/spark/DistributedSuite.scala
+++ b/core/src/test/scala/org/apache/spark/DistributedSuite.scala
@@ -107,7 +107,9 @@ class DistributedSuite extends SparkFunSuite with Matchers with LocalSparkContex
sc = new SparkContext(clusterUrl, "test")
val accum = sc.accumulator(0)
val thrown = intercept[SparkException] {
+ // scalastyle:off println
sc.parallelize(1 to 10, 10).foreach(x => println(x / 0))
+ // scalastyle:on println
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getMessage.contains("failed 4 times"))
diff --git a/core/src/test/scala/org/apache/spark/FailureSuite.scala b/core/src/test/scala/org/apache/spark/FailureSuite.scala
index a8c8c6f73f..b099cd3fb7 100644
--- a/core/src/test/scala/org/apache/spark/FailureSuite.scala
+++ b/core/src/test/scala/org/apache/spark/FailureSuite.scala
@@ -130,7 +130,9 @@ class FailureSuite extends SparkFunSuite with LocalSparkContext {
// Non-serializable closure in foreach function
val thrown2 = intercept[SparkException] {
+ // scalastyle:off println
sc.parallelize(1 to 10, 2).foreach(x => println(a))
+ // scalastyle:on println
}
assert(thrown2.getClass === classOf[SparkException])
assert(thrown2.getMessage.contains("NotSerializableException") ||
diff --git a/core/src/test/scala/org/apache/spark/FileServerSuite.scala b/core/src/test/scala/org/apache/spark/FileServerSuite.scala
index 6e65b0a8f6..876418aa13 100644
--- a/core/src/test/scala/org/apache/spark/FileServerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/FileServerSuite.scala
@@ -51,7 +51,9 @@ class FileServerSuite extends SparkFunSuite with LocalSparkContext {
val textFile = new File(testTempDir, "FileServerSuite.txt")
val pw = new PrintWriter(textFile)
+ // scalastyle:off println
pw.println("100")
+ // scalastyle:on println
pw.close()
val jarFile = new File(testTempDir, "test.jar")
diff --git a/core/src/test/scala/org/apache/spark/ThreadingSuite.scala b/core/src/test/scala/org/apache/spark/ThreadingSuite.scala
index 6580139df6..48509f0759 100644
--- a/core/src/test/scala/org/apache/spark/ThreadingSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ThreadingSuite.scala
@@ -36,7 +36,7 @@ object ThreadingSuiteState {
}
}
-class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
+class ThreadingSuite extends SparkFunSuite with LocalSparkContext with Logging {
test("accessing SparkContext form a different thread") {
sc = new SparkContext("local", "test")
@@ -130,8 +130,6 @@ class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
Thread.sleep(100)
}
if (running.get() != 4) {
- println("Waited 1 second without seeing runningThreads = 4 (it was " +
- running.get() + "); failing test")
ThreadingSuiteState.failed.set(true)
}
number
@@ -143,6 +141,8 @@ class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
}
sem.acquire(2)
if (ThreadingSuiteState.failed.get()) {
+ logError("Waited 1 second without seeing runningThreads = 4 (it was " +
+ ThreadingSuiteState.runningThreads.get() + "); failing test")
fail("One or more threads didn't see runningThreads = 4")
}
}
diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
index 2e05dec99b..1b64c329b5 100644
--- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
@@ -51,9 +51,11 @@ class SparkSubmitSuite
/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
+ // scalastyle:off println
override def println(line: String) {
lineBuffer += line
}
+ // scalastyle:on println
}
/** Returns true if the script exits and the given search string is printed. */
@@ -81,6 +83,7 @@ class SparkSubmitSuite
}
}
+ // scalastyle:off println
test("prints usage on empty input") {
testPrematureExit(Array[String](), "Usage: spark-submit")
}
@@ -491,6 +494,7 @@ class SparkSubmitSuite
appArgs.executorMemory should be ("2.3g")
}
}
+ // scalastyle:on println
// NOTE: This is an expensive operation in terms of time (10 seconds+). Use sparingly.
private def runSparkSubmit(args: Seq[String]): Unit = {
diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala
index c9b435a922..01ece1a10f 100644
--- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala
@@ -41,9 +41,11 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll {
/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
+ // scalastyle:off println
override def println(line: String) {
lineBuffer += line
}
+ // scalastyle:on println
}
override def beforeAll() {
diff --git a/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala b/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala
index 63947df3d4..8a199459c1 100644
--- a/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala
+++ b/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala
@@ -27,7 +27,7 @@ import org.scalatest.BeforeAndAfterAll
import org.apache.hadoop.io.Text
-import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
+import org.apache.spark.{Logging, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.util.Utils
import org.apache.hadoop.io.compress.{DefaultCodec, CompressionCodecFactory, GzipCodec}
@@ -36,7 +36,7 @@ import org.apache.hadoop.io.compress.{DefaultCodec, CompressionCodecFactory, Gzi
* [[org.apache.spark.input.WholeTextFileRecordReader WholeTextFileRecordReader]]. A temporary
* directory is created as fake input. Temporal storage would be deleted in the end.
*/
-class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAll {
+class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAll with Logging {
private var sc: SparkContext = _
private var factory: CompressionCodecFactory = _
@@ -85,7 +85,7 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl
*/
test("Correctness of WholeTextFileRecordReader.") {
val dir = Utils.createTempDir()
- println(s"Local disk address is ${dir.toString}.")
+ logInfo(s"Local disk address is ${dir.toString}.")
WholeTextFileRecordReaderSuite.files.foreach { case (filename, contents) =>
createNativeFile(dir, filename, contents, false)
@@ -109,7 +109,7 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl
test("Correctness of WholeTextFileRecordReader with GzipCodec.") {
val dir = Utils.createTempDir()
- println(s"Local disk address is ${dir.toString}.")
+ logInfo(s"Local disk address is ${dir.toString}.")
WholeTextFileRecordReaderSuite.files.foreach { case (filename, contents) =>
createNativeFile(dir, filename, contents, true)
diff --git a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
index 9e4d34fb7d..d3218a548e 100644
--- a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
@@ -60,7 +60,9 @@ class InputOutputMetricsSuite extends SparkFunSuite with SharedSparkContext
tmpFile = new File(testTempDir, getClass.getSimpleName + ".txt")
val pw = new PrintWriter(new FileWriter(tmpFile))
for (x <- 1 to numRecords) {
+ // scalastyle:off println
pw.println(RandomUtils.nextInt(0, numBuckets))
+ // scalastyle:on println
}
pw.close()
diff --git a/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala
index ff3fa95ec3..4e3defb43a 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala
@@ -52,8 +52,10 @@ class ReplayListenerSuite extends SparkFunSuite with BeforeAndAfter {
val applicationStart = SparkListenerApplicationStart("Greatest App (N)ever", None,
125L, "Mickey", None)
val applicationEnd = SparkListenerApplicationEnd(1000L)
+ // scalastyle:off println
writer.println(compact(render(JsonProtocol.sparkEventToJson(applicationStart))))
writer.println(compact(render(JsonProtocol.sparkEventToJson(applicationEnd))))
+ // scalastyle:on println
writer.close()
val conf = EventLoggingListenerSuite.getLoggingConf(logFilePath)
diff --git a/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala b/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
index 1053c6caf7..480722a5ac 100644
--- a/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
@@ -375,6 +375,7 @@ class TestCreateNullValue {
// parameters of the closure constructor. This allows us to test whether
// null values are created correctly for each type.
val nestedClosure = () => {
+ // scalastyle:off println
if (s.toString == "123") { // Don't really output them to avoid noisy
println(bo)
println(c)
@@ -389,6 +390,7 @@ class TestCreateNullValue {
val closure = () => {
println(getX)
}
+ // scalastyle:on println
ClosureCleaner.clean(closure)
}
nestedClosure()
diff --git a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
index 251a797dc2..c7638507c8 100644
--- a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
@@ -684,7 +684,9 @@ class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging {
val buffer = new CircularBuffer(25)
val stream = new java.io.PrintStream(buffer, true, "UTF-8")
+ // scalastyle:off println
stream.println("test circular test circular test circular test circular test circular")
+ // scalastyle:on println
assert(buffer.toString === "t circular test circular\n")
}
}
diff --git a/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala b/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
index 5a5919fca2..4f382414a8 100644
--- a/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
@@ -103,7 +103,9 @@ private object SizeTrackerSuite {
*/
def main(args: Array[String]): Unit = {
if (args.size < 1) {
+ // scalastyle:off println
println("Usage: SizeTrackerSuite [num elements]")
+ // scalastyle:on println
System.exit(1)
}
val numElements = args(0).toInt
@@ -180,11 +182,13 @@ private object SizeTrackerSuite {
baseTimes: Seq[Long],
sampledTimes: Seq[Long],
unsampledTimes: Seq[Long]): Unit = {
+ // scalastyle:off println
println(s"Average times for $testName (ms):")
println(" Base - " + averageTime(baseTimes))
println(" SizeTracker (sampled) - " + averageTime(sampledTimes))
println(" SizeEstimator (unsampled) - " + averageTime(unsampledTimes))
println()
+ // scalastyle:on println
}
def time(f: => Unit): Long = {
diff --git a/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala b/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala
index b2f5d9009e..fefa5165db 100644
--- a/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala
@@ -20,10 +20,10 @@ package org.apache.spark.util.collection
import java.lang.{Float => JFloat, Integer => JInteger}
import java.util.{Arrays, Comparator}
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.util.random.XORShiftRandom
-class SorterSuite extends SparkFunSuite {
+class SorterSuite extends SparkFunSuite with Logging {
test("equivalent to Arrays.sort") {
val rand = new XORShiftRandom(123)
@@ -74,7 +74,7 @@ class SorterSuite extends SparkFunSuite {
/** Runs an experiment several times. */
def runExperiment(name: String, skip: Boolean = false)(f: => Unit, prepare: () => Unit): Unit = {
if (skip) {
- println(s"Skipped experiment $name.")
+ logInfo(s"Skipped experiment $name.")
return
}
@@ -86,11 +86,11 @@ class SorterSuite extends SparkFunSuite {
while (i < 10) {
val time = org.apache.spark.util.Utils.timeIt(1)(f, Some(prepare))
next10 += time
- println(s"$name: Took $time ms")
+ logInfo(s"$name: Took $time ms")
i += 1
}
- println(s"$name: ($firstTry ms first try, ${next10 / 10} ms average)")
+ logInfo(s"$name: ($firstTry ms first try, ${next10 / 10} ms average)")
}
/**
diff --git a/dev/audit-release/sbt_app_core/src/main/scala/SparkApp.scala b/dev/audit-release/sbt_app_core/src/main/scala/SparkApp.scala
index fc03fec986..61d91c70e9 100644
--- a/dev/audit-release/sbt_app_core/src/main/scala/SparkApp.scala
+++ b/dev/audit-release/sbt_app_core/src/main/scala/SparkApp.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package main.scala
import scala.util.Try
@@ -59,3 +60,4 @@ object SimpleApp {
}
}
}
+// scalastyle:on println
diff --git a/dev/audit-release/sbt_app_ganglia/src/main/scala/SparkApp.scala b/dev/audit-release/sbt_app_ganglia/src/main/scala/SparkApp.scala
index 0be8e64fbf..9f7ae75d0b 100644
--- a/dev/audit-release/sbt_app_ganglia/src/main/scala/SparkApp.scala
+++ b/dev/audit-release/sbt_app_ganglia/src/main/scala/SparkApp.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package main.scala
import scala.util.Try
@@ -37,3 +38,4 @@ object SimpleApp {
}
}
}
+// scalastyle:on println
diff --git a/dev/audit-release/sbt_app_graphx/src/main/scala/GraphxApp.scala b/dev/audit-release/sbt_app_graphx/src/main/scala/GraphxApp.scala
index 24c7f8d667..2f0b6ef9a5 100644
--- a/dev/audit-release/sbt_app_graphx/src/main/scala/GraphxApp.scala
+++ b/dev/audit-release/sbt_app_graphx/src/main/scala/GraphxApp.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package main.scala
import org.apache.spark.{SparkContext, SparkConf}
@@ -51,3 +52,4 @@ object GraphXApp {
println("Test succeeded")
}
}
+// scalastyle:on println
diff --git a/dev/audit-release/sbt_app_hive/src/main/scala/HiveApp.scala b/dev/audit-release/sbt_app_hive/src/main/scala/HiveApp.scala
index 5111bc0adb..4a980ec071 100644
--- a/dev/audit-release/sbt_app_hive/src/main/scala/HiveApp.scala
+++ b/dev/audit-release/sbt_app_hive/src/main/scala/HiveApp.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package main.scala
import scala.collection.mutable.{ListBuffer, Queue}
@@ -55,3 +56,4 @@ object SparkSqlExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/dev/audit-release/sbt_app_kinesis/src/main/scala/SparkApp.scala b/dev/audit-release/sbt_app_kinesis/src/main/scala/SparkApp.scala
index 9f85066501..adc25b57d6 100644
--- a/dev/audit-release/sbt_app_kinesis/src/main/scala/SparkApp.scala
+++ b/dev/audit-release/sbt_app_kinesis/src/main/scala/SparkApp.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package main.scala
import scala.util.Try
@@ -31,3 +32,4 @@ object SimpleApp {
}
}
}
+// scalastyle:on println
diff --git a/dev/audit-release/sbt_app_sql/src/main/scala/SqlApp.scala b/dev/audit-release/sbt_app_sql/src/main/scala/SqlApp.scala
index cc86ef4585..69c1154dc0 100644
--- a/dev/audit-release/sbt_app_sql/src/main/scala/SqlApp.scala
+++ b/dev/audit-release/sbt_app_sql/src/main/scala/SqlApp.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package main.scala
import scala.collection.mutable.{ListBuffer, Queue}
@@ -57,3 +58,4 @@ object SparkSqlExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/dev/audit-release/sbt_app_streaming/src/main/scala/StreamingApp.scala b/dev/audit-release/sbt_app_streaming/src/main/scala/StreamingApp.scala
index 58a662bd9b..d6a074687f 100644
--- a/dev/audit-release/sbt_app_streaming/src/main/scala/StreamingApp.scala
+++ b/dev/audit-release/sbt_app_streaming/src/main/scala/StreamingApp.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package main.scala
import scala.collection.mutable.{ListBuffer, Queue}
@@ -61,3 +62,4 @@ object SparkStreamingExample {
ssc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala
index 4c129dbe2d..d812262fd8 100644
--- a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.{SparkConf, SparkContext}
@@ -52,3 +53,4 @@ object BroadcastTest {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala b/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala
index 023bb3ee2d..36832f51d2 100644
--- a/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+ // scalastyle:off println
package org.apache.spark.examples
import java.nio.ByteBuffer
@@ -140,3 +141,4 @@ object CassandraCQLTest {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala b/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala
index ec689474ae..96ef3e198e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/CassandraTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.nio.ByteBuffer
@@ -130,6 +131,7 @@ object CassandraTest {
sc.stop()
}
}
+// scalastyle:on println
/*
create keyspace casDemo;
diff --git a/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala b/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
index 1f12034ce0..d651fe4d6e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/DFSReadWriteTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.io.File
@@ -136,3 +137,4 @@ object DFSReadWriteTest {
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
index e757283823..c42df2b884 100644
--- a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import scala.collection.JavaConversions._
@@ -46,3 +47,4 @@ object DriverSubmissionTest {
}
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala b/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala
index 15f6678648..fa4a3afeec 100644
--- a/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/GroupByTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -53,3 +54,4 @@ object GroupByTest {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
index 95c96111c9..244742327a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.hadoop.hbase.client.HBaseAdmin
@@ -62,3 +63,4 @@ object HBaseTest {
admin.close()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala b/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala
index ed2b38e2ca..124dc9af63 100644
--- a/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark._
@@ -41,3 +42,4 @@ object HdfsTest {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala b/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
index 3d52594630..af5f216f28 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.commons.math3.linear._
@@ -142,3 +143,4 @@ object LocalALS {
new Array2DRowRealMatrix(Array.fill(rows, cols)(math.random))
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
index ac2ea35bbd..9c8aae53cf 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -73,3 +74,4 @@ object LocalFileLR {
println("Final w: " + w)
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
index 04fc0a0330..e7b28d38bd 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -119,3 +120,4 @@ object LocalKMeans {
println("Final centers: " + kPoints)
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
index c3fc74a116..4f6b092a59 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -77,3 +78,4 @@ object LocalLR {
println("Final w: " + w)
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
index ee6b3ee34a..3d923625f1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import scala.math.random
@@ -33,3 +34,4 @@ object LocalPi {
println("Pi is roughly " + 4 * count / 100000.0)
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala b/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala
index 75c82117cb..a80de10f46 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LogQuery.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.{SparkConf, SparkContext}
@@ -83,3 +84,4 @@ object LogQuery {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala b/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
index 2a5c0c0def..61ce9db914 100644
--- a/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.rdd.RDD
@@ -53,3 +54,4 @@ object MultiBroadcastTest {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala b/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala
index 5291ab81f4..3b0b00fe4d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SimpleSkewedGroupByTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -67,3 +68,4 @@ object SimpleSkewedGroupByTest {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala b/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala
index 017d4e1e5c..719e2176fe 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SkewedGroupByTest.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -57,3 +58,4 @@ object SkewedGroupByTest {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
index 30c4261551..69799b7c2b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.commons.math3.linear._
@@ -144,3 +145,4 @@ object SparkALS {
new Array2DRowRealMatrix(Array.fill(rows, cols)(math.random))
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
index 9099c2fcc9..505ea5a4c7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -97,3 +98,4 @@ object SparkHdfsLR {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
index b514d9123f..c56e1124ad 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import breeze.linalg.{Vector, DenseVector, squaredDistance}
@@ -100,3 +101,4 @@ object SparkKMeans {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
index 1e6b4fb0c7..d265c227f4 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -86,3 +87,4 @@ object SparkLR {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala b/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
index bd7894f184..0fd79660dd 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.SparkContext._
@@ -74,3 +75,4 @@ object SparkPageRank {
ctx.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
index 35b8dd6c29..818d4f2b81 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import scala.math.random
@@ -37,3 +38,4 @@ object SparkPi {
spark.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala b/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala
index 772cd897f5..95072071cc 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkTC.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import scala.util.Random
@@ -70,3 +71,4 @@ object SparkTC {
spark.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
index 4393b99e63..cfbdae0221 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
@@ -94,3 +95,4 @@ object SparkTachyonHdfsLR {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala
index 7743f7968b..e46ac655be 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples
import scala.math.random
@@ -46,3 +47,4 @@ object SparkTachyonPi {
spark.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala b/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
index 409721b01c..8dd6c9706e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.graphx
import scala.collection.mutable
@@ -151,3 +152,4 @@ object Analytics extends Logging {
}
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/graphx/LiveJournalPageRank.scala b/examples/src/main/scala/org/apache/spark/examples/graphx/LiveJournalPageRank.scala
index f6f8d9f90c..da3ffca1a6 100644
--- a/examples/src/main/scala/org/apache/spark/examples/graphx/LiveJournalPageRank.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/graphx/LiveJournalPageRank.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.graphx
/**
@@ -42,3 +43,4 @@ object LiveJournalPageRank {
Analytics.main(args.patch(0, List("pagerank"), 0))
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala b/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala
index 3ec20d594b..46e52aacd9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/graphx/SynthBenchmark.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.graphx
import org.apache.spark.SparkContext._
@@ -128,3 +129,4 @@ object SynthBenchmark {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/CrossValidatorExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/CrossValidatorExample.scala
index 6c0af20461..14b358d46f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/CrossValidatorExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/CrossValidatorExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import org.apache.spark.{SparkConf, SparkContext}
@@ -110,3 +111,4 @@ object CrossValidatorExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
index 54e4073941..f28671f786 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import scala.collection.mutable
@@ -355,3 +356,4 @@ object DecisionTreeExample {
println(s" Root mean squared error (RMSE): $RMSE")
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
index 7b8cc21ed8..78f31b4ffe 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import org.apache.spark.{SparkConf, SparkContext}
@@ -181,3 +182,4 @@ private class MyLogisticRegressionModel(
copyValues(new MyLogisticRegressionModel(uid, weights), extra)
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
index 33905277c7..f4a15f806e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import scala.collection.mutable
@@ -236,3 +237,4 @@ object GBTExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
index b54466fd48..b73299fb12 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import scala.collection.mutable
@@ -140,3 +141,4 @@ object LinearRegressionExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
index 3cf193f353..7682557127 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import scala.collection.mutable
@@ -157,3 +158,4 @@ object LogisticRegressionExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala b/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala
index 25f21113bf..cd411397a4 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import scopt.OptionParser
@@ -178,3 +179,4 @@ object MovieLensALS {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
index 6927eb8f27..bab31f585b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import java.util.concurrent.TimeUnit.{NANOSECONDS => NANO}
@@ -183,3 +184,4 @@ object OneVsRestExample {
(NANO.toSeconds(t1 - t0), result)
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
index 9f7cad68a4..109178f413 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import scala.collection.mutable
@@ -244,3 +245,4 @@ object RandomForestExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
index a0561e2573..58d7b67674 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import org.apache.spark.{SparkConf, SparkContext}
@@ -100,3 +101,4 @@ object SimpleParamsExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
index 1324b066c3..960280137c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.ml
import scala.beans.BeanInfo
@@ -89,3 +90,4 @@ object SimpleTextClassificationPipeline {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
index a113653810..1a4016f76c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.log4j.{Level, Logger}
@@ -153,3 +154,4 @@ object BinaryClassification {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala
index e49129c4e7..026d4ecc6d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import scopt.OptionParser
@@ -91,3 +92,4 @@ object Correlations {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
index cb1abbd18f..69988cc1b9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import scopt.OptionParser
@@ -106,3 +107,4 @@ object CosineSimilarity {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala
index 520893b26d..dc13f82488 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import java.io.File
@@ -119,3 +120,4 @@ object DatasetExample {
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
index 3381941673..57ffe3dd25 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import scala.language.reflectiveCalls
@@ -368,3 +369,4 @@ object DecisionTreeRunner {
}
// scalastyle:on structural.type
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseGaussianMixture.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseGaussianMixture.scala
index f8c71ccabc..1fce4ba7ef 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseGaussianMixture.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseGaussianMixture.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.{SparkConf, SparkContext}
@@ -65,3 +66,4 @@ object DenseGaussianMixture {
println()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
index 14cc5cbb67..380d85d60e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.log4j.{Level, Logger}
@@ -107,3 +108,4 @@ object DenseKMeans {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala
index 13f24a1e59..14b930550d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import scopt.OptionParser
@@ -80,3 +81,4 @@ object FPGrowthExample {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
index 7416fb5a40..e16a6bf033 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import scopt.OptionParser
@@ -145,3 +146,4 @@ object GradientBoostedTreesRunner {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
index 31d629f853..75b0f69cf9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import java.text.BreakIterator
@@ -302,3 +303,4 @@ private class SimpleTokenizer(sc: SparkContext, stopwordFile: String) extends Se
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
index 6a456ba7ec..8878061a09 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.log4j.{Level, Logger}
@@ -134,3 +135,4 @@ object LinearRegression {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
index 99588b0984..e43a6f2864 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import scala.collection.mutable
@@ -189,3 +190,4 @@ object MovieLensALS {
math.sqrt(predictionsAndRatings.map(x => (x._1 - x._2) * (x._1 - x._2)).mean())
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
index 6e4e2d07f2..5f839c75dd 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import scopt.OptionParser
@@ -97,3 +98,4 @@ object MultivariateSummarizer {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
index 6d8b806569..0723223954 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.log4j.{Level, Logger}
@@ -154,4 +155,4 @@ object PowerIterationClusteringExample {
coeff * math.exp(expCoeff * ssquares)
}
}
-
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/RandomRDDGeneration.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/RandomRDDGeneration.scala
index 924b586e3a..bee85ba0f9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/RandomRDDGeneration.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/RandomRDDGeneration.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.mllib.random.RandomRDDs
@@ -58,3 +59,4 @@ object RandomRDDGeneration {
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
index 663c12734a..6963f43e08 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.mllib.util.MLUtils
@@ -125,3 +126,4 @@ object SampledRDDs {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
index f1ff4e6911..f81fc292a3 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.log4j.{Level, Logger}
@@ -100,3 +101,4 @@ object SparseNaiveBayes {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala
index 8bb12d2ee9..af03724a8a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.SparkConf
@@ -75,3 +76,4 @@ object StreamingKMeansExample {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLinearRegression.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLinearRegression.scala
index 1a95048bbf..b4a5dca031 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLinearRegression.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLinearRegression.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.mllib.linalg.Vectors
@@ -69,3 +70,4 @@ object StreamingLinearRegression {
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLogisticRegression.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLogisticRegression.scala
index e1998099c2..b42f4cb5f9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLogisticRegression.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingLogisticRegression.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.mllib.linalg.Vectors
@@ -71,3 +72,4 @@ object StreamingLogisticRegression {
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnyPCA.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnyPCA.scala
index 3cd9cb743e..464fbd385a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnyPCA.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnyPCA.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.{SparkConf, SparkContext}
@@ -58,3 +59,4 @@ object TallSkinnyPCA {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnySVD.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnySVD.scala
index 4d66903186..65b4bc46f0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnySVD.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/TallSkinnySVD.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.{SparkConf, SparkContext}
@@ -58,3 +59,4 @@ object TallSkinnySVD {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index b11e32047d..2cc56f04e5 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.sql
import org.apache.spark.{SparkConf, SparkContext}
@@ -73,3 +74,4 @@ object RDDRelation {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
index b7ba60ec28..bf40bd1ef1 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.sql.hive
import com.google.common.io.{ByteStreams, Files}
@@ -77,3 +78,4 @@ object HiveFromSpark {
sc.stop()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
index 016de4c63d..e9c9907198 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import scala.collection.mutable.LinkedList
@@ -170,3 +171,4 @@ object ActorWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
index 30269a7cca..28e9bf520e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import java.io.{InputStreamReader, BufferedReader, InputStream}
@@ -100,3 +101,4 @@ class CustomReceiver(host: String, port: Int)
}
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/DirectKafkaWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/DirectKafkaWordCount.scala
index fbe394de4a..bd78526f8c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/DirectKafkaWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/DirectKafkaWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import kafka.serializer.StringDecoder
@@ -70,3 +71,4 @@ object DirectKafkaWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala
index 20e7df7c45..91e52e4eff 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
@@ -66,3 +67,4 @@ object FlumeEventCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala
index 1cc8c8d5c2..2bdbc37e2a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumePollingEventCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
@@ -65,3 +66,4 @@ object FlumePollingEventCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala
index 4b4667fec4..1f282d437d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
@@ -53,3 +54,4 @@ object HdfsWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala
index 60416ee343..b40d17e9c2 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import java.util.HashMap
@@ -101,3 +102,4 @@ object KafkaWordCountProducer {
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala
index 813c8554f5..d772ae309f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.eclipse.paho.client.mqttv3._
@@ -96,8 +97,10 @@ object MQTTWordCount {
def main(args: Array[String]) {
if (args.length < 2) {
+ // scalastyle:off println
System.err.println(
"Usage: MQTTWordCount <MqttbrokerUrl> <topic>")
+ // scalastyle:on println
System.exit(1)
}
@@ -113,3 +116,4 @@ object MQTTWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
index 2cd8073dad..9a57fe286d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
@@ -57,3 +58,4 @@ object NetworkWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala
index a9aaa445bc..5322929d17 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
@@ -58,3 +59,4 @@ object RawNetworkGrep {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
index 751b30ea15..9916882e4f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import java.io.File
@@ -108,3 +109,4 @@ object RecoverableNetworkWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 5a6b9216a3..ed617754cb 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
@@ -99,3 +100,4 @@ object SQLContextSingleton {
instance
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
index 345d0bc441..02ba1c2eed 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
@@ -78,3 +79,4 @@ object StatefulNetworkWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
index c10de84a80..825c671a92 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import com.twitter.algebird._
@@ -113,3 +114,4 @@ object TwitterAlgebirdCMS {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala
index 62db5e663b..49826ede70 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import com.twitter.algebird.HyperLogLogMonoid
@@ -90,3 +91,4 @@ object TwitterAlgebirdHLL {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
index f253d75b27..49cee1b43c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.streaming.{Seconds, StreamingContext}
@@ -82,3 +83,4 @@ object TwitterPopularTags {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
index e99d1baa72..6ac9a72c37 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import akka.actor.ActorSystem
@@ -97,3 +98,4 @@ object ZeroMQWordCount {
ssc.awaitTermination()
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
index 889f052c70..bea7a47cb2 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream
import java.net.ServerSocket
@@ -108,3 +109,4 @@ object PageViewGenerator {
}
}
}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
index fbacaee986..ec7d39da8b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream
import org.apache.spark.SparkContext._
@@ -107,3 +108,4 @@ object PageViewStream {
ssc.start()
}
}
+// scalastyle:on println
diff --git a/external/kafka/src/test/scala/org/apache/spark/streaming/kafka/DirectKafkaStreamSuite.scala b/external/kafka/src/test/scala/org/apache/spark/streaming/kafka/DirectKafkaStreamSuite.scala
index 8e1715f6db..5b3c79444a 100644
--- a/external/kafka/src/test/scala/org/apache/spark/streaming/kafka/DirectKafkaStreamSuite.scala
+++ b/external/kafka/src/test/scala/org/apache/spark/streaming/kafka/DirectKafkaStreamSuite.scala
@@ -111,7 +111,7 @@ class DirectKafkaStreamSuite
rdd
}.foreachRDD { rdd =>
for (o <- offsetRanges) {
- println(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
+ logInfo(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
}
val collected = rdd.mapPartitionsWithIndex { (i, iter) =>
// For each partition, get size of the range in the partition,
diff --git a/extras/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala b/extras/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
index be8b62d3cc..de749626ec 100644
--- a/extras/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
+++ b/extras/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// scalastyle:off println
package org.apache.spark.examples.streaming
import java.nio.ByteBuffer
@@ -272,3 +273,4 @@ private[streaming] object StreamingExamples extends Logging {
}
}
}
+// scalastyle:on println
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala b/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala
index be6b9047d9..5c07b415cd 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala
@@ -66,7 +66,6 @@ private[graphx] object BytecodeUtils {
val finder = new MethodInvocationFinder(c.getName, m)
getClassReader(c).accept(finder, 0)
for (classMethod <- finder.methodsInvoked) {
- // println(classMethod)
if (classMethod._1 == targetClass && classMethod._2 == targetMethod) {
return true
} else if (!seen.contains(classMethod)) {
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala b/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
index 9591c4e9b8..989e226305 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
@@ -33,7 +33,7 @@ import org.apache.spark.graphx.Edge
import org.apache.spark.graphx.impl.GraphImpl
/** A collection of graph generating functions. */
-object GraphGenerators {
+object GraphGenerators extends Logging {
val RMATa = 0.45
val RMATb = 0.15
@@ -142,7 +142,7 @@ object GraphGenerators {
var edges: Set[Edge[Int]] = Set()
while (edges.size < numEdges) {
if (edges.size % 100 == 0) {
- println(edges.size + " edges")
+ logDebug(edges.size + " edges")
}
edges += addEdge(numVertices)
}
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/util/BytecodeUtilsSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/util/BytecodeUtilsSuite.scala
index 186d0cc2a9..61e44dcab5 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/util/BytecodeUtilsSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/util/BytecodeUtilsSuite.scala
@@ -20,6 +20,7 @@ package org.apache.spark.graphx.util
import org.apache.spark.SparkFunSuite
+// scalastyle:off println
class BytecodeUtilsSuite extends SparkFunSuite {
import BytecodeUtilsSuite.TestClass
@@ -102,6 +103,7 @@ class BytecodeUtilsSuite extends SparkFunSuite {
private val c = {e: TestClass => println(e.baz)}
}
+// scalastyle:on println
object BytecodeUtilsSuite {
class TestClass(val foo: Int, val bar: Long) {
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala
index 6eaebaf7db..e6bcff48b0 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala
@@ -64,8 +64,10 @@ object KMeansDataGenerator {
def main(args: Array[String]) {
if (args.length < 6) {
+ // scalastyle:off println
println("Usage: KMeansGenerator " +
"<master> <output_dir> <num_points> <k> <d> <r> [<num_partitions>]")
+ // scalastyle:on println
System.exit(1)
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala
index b4e33c98ba..87eeb5db05 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala
@@ -153,8 +153,10 @@ object LinearDataGenerator {
def main(args: Array[String]) {
if (args.length < 2) {
+ // scalastyle:off println
println("Usage: LinearDataGenerator " +
"<master> <output_dir> [num_examples] [num_features] [num_partitions]")
+ // scalastyle:on println
System.exit(1)
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala
index 9d802678c4..c09cbe69bb 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala
@@ -64,8 +64,10 @@ object LogisticRegressionDataGenerator {
def main(args: Array[String]) {
if (args.length != 5) {
+ // scalastyle:off println
println("Usage: LogisticRegressionGenerator " +
"<master> <output_dir> <num_examples> <num_features> <num_partitions>")
+ // scalastyle:on println
System.exit(1)
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
index bd73a866c8..16f430599a 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
@@ -55,8 +55,10 @@ import org.apache.spark.rdd.RDD
object MFDataGenerator {
def main(args: Array[String]) {
if (args.length < 2) {
+ // scalastyle:off println
println("Usage: MFDataGenerator " +
"<master> <outputDir> [m] [n] [rank] [trainSampFact] [noise] [sigma] [test] [testSampFact]")
+ // scalastyle:on println
System.exit(1)
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala
index a8e30cc9d7..ad20b7694a 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala
@@ -37,8 +37,10 @@ object SVMDataGenerator {
def main(args: Array[String]) {
if (args.length < 2) {
+ // scalastyle:off println
println("Usage: SVMGenerator " +
"<master> <output_dir> [num_examples] [num_features] [num_partitions]")
+ // scalastyle:on println
System.exit(1)
}
diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala
index 8c85c96d5c..03120c828c 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala
@@ -19,7 +19,7 @@ package org.apache.spark.ml.feature
import scala.beans.{BeanInfo, BeanProperty}
-import org.apache.spark.{SparkException, SparkFunSuite}
+import org.apache.spark.{Logging, SparkException, SparkFunSuite}
import org.apache.spark.ml.attribute._
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.mllib.linalg.{SparseVector, Vector, Vectors}
@@ -27,7 +27,7 @@ import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
-class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext {
+class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext with Logging {
import VectorIndexerSuite.FeatureData
@@ -113,11 +113,11 @@ class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext {
model.transform(sparsePoints1) // should work
intercept[SparkException] {
model.transform(densePoints2).collect()
- println("Did not throw error when fit, transform were called on vectors of different lengths")
+ logInfo("Did not throw error when fit, transform were called on vectors of different lengths")
}
intercept[SparkException] {
vectorIndexer.fit(badPoints)
- println("Did not throw error when fitting vectors of different lengths in same RDD.")
+ logInfo("Did not throw error when fitting vectors of different lengths in same RDD.")
}
}
@@ -196,7 +196,7 @@ class VectorIndexerSuite extends SparkFunSuite with MLlibTestSparkContext {
}
} catch {
case e: org.scalatest.exceptions.TestFailedException =>
- println(errMsg)
+ logError(errMsg)
throw e
}
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala
index c4ae0a16f7..178d95a7b9 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala
@@ -21,10 +21,10 @@ import scala.util.Random
import breeze.linalg.{DenseMatrix => BDM, squaredDistance => breezeSquaredDistance}
-import org.apache.spark.{SparkException, SparkFunSuite}
+import org.apache.spark.{Logging, SparkException, SparkFunSuite}
import org.apache.spark.mllib.util.TestingUtils._
-class VectorsSuite extends SparkFunSuite {
+class VectorsSuite extends SparkFunSuite with Logging {
val arr = Array(0.1, 0.0, 0.3, 0.4)
val n = 4
@@ -142,7 +142,7 @@ class VectorsSuite extends SparkFunSuite {
malformatted.foreach { s =>
intercept[SparkException] {
Vectors.parse(s)
- println(s"Didn't detect malformatted string $s.")
+ logInfo(s"Didn't detect malformatted string $s.")
}
}
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala
index c292ced75e..c3eeda0125 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala
@@ -19,13 +19,13 @@ package org.apache.spark.mllib.stat
import breeze.linalg.{DenseMatrix => BDM, Matrix => BM}
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.stat.correlation.{Correlations, PearsonCorrelation,
SpearmanCorrelation}
import org.apache.spark.mllib.util.MLlibTestSparkContext
-class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext {
+class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext with Logging {
// test input data
val xData = Array(1.0, 0.0, -2.0)
@@ -146,7 +146,7 @@ class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext {
def matrixApproxEqual(A: BM[Double], B: BM[Double], threshold: Double = 1e-6): Boolean = {
for (i <- 0 until A.rows; j <- 0 until A.cols) {
if (!approxEqual(A(i, j), B(i, j), threshold)) {
- println("i, j = " + i + ", " + j + " actual: " + A(i, j) + " expected:" + B(i, j))
+ logInfo("i, j = " + i + ", " + j + " actual: " + A(i, j) + " expected:" + B(i, j))
return false
}
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/tree/GradientBoostedTreesSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/tree/GradientBoostedTreesSuite.scala
index 84dd3b342d..2521b33421 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/tree/GradientBoostedTreesSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/tree/GradientBoostedTreesSuite.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.tree
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.configuration.{BoostingStrategy, Strategy}
@@ -31,7 +31,7 @@ import org.apache.spark.util.Utils
/**
* Test suite for [[GradientBoostedTrees]].
*/
-class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext {
+class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext with Logging {
test("Regression with continuous features: SquaredError") {
GradientBoostedTreesSuite.testCombinations.foreach {
@@ -50,7 +50,7 @@ class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext
EnsembleTestHelper.validateRegressor(gbt, GradientBoostedTreesSuite.data, 0.06)
} catch {
case e: java.lang.AssertionError =>
- println(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
+ logError(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
s" subsamplingRate=$subsamplingRate")
throw e
}
@@ -80,7 +80,7 @@ class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext
EnsembleTestHelper.validateRegressor(gbt, GradientBoostedTreesSuite.data, 0.85, "mae")
} catch {
case e: java.lang.AssertionError =>
- println(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
+ logError(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
s" subsamplingRate=$subsamplingRate")
throw e
}
@@ -111,7 +111,7 @@ class GradientBoostedTreesSuite extends SparkFunSuite with MLlibTestSparkContext
EnsembleTestHelper.validateClassifier(gbt, GradientBoostedTreesSuite.data, 0.9)
} catch {
case e: java.lang.AssertionError =>
- println(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
+ logError(s"FAILED for numIterations=$numIterations, learningRate=$learningRate," +
s" subsamplingRate=$subsamplingRate")
throw e
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala
index fa4f74d71b..16d7c3ab39 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/util/NumericParserSuite.scala
@@ -33,7 +33,7 @@ class NumericParserSuite extends SparkFunSuite {
malformatted.foreach { s =>
intercept[SparkException] {
NumericParser.parse(s)
- println(s"Didn't detect malformatted string $s.")
+ throw new RuntimeException(s"Didn't detect malformatted string $s.")
}
}
}
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 3408c6d51e..4291b0be2a 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -69,6 +69,7 @@ object SparkBuild extends PomBuild {
import scala.collection.mutable
var isAlphaYarn = false
var profiles: mutable.Seq[String] = mutable.Seq("sbt")
+ // scalastyle:off println
if (Properties.envOrNone("SPARK_GANGLIA_LGPL").isDefined) {
println("NOTE: SPARK_GANGLIA_LGPL is deprecated, please use -Pspark-ganglia-lgpl flag.")
profiles ++= Seq("spark-ganglia-lgpl")
@@ -88,6 +89,7 @@ object SparkBuild extends PomBuild {
println("NOTE: SPARK_YARN is deprecated, please use -Pyarn flag.")
profiles ++= Seq("yarn")
}
+ // scalastyle:on println
profiles
}
@@ -96,8 +98,10 @@ object SparkBuild extends PomBuild {
case None => backwardCompatibility
case Some(v) =>
if (backwardCompatibility.nonEmpty)
+ // scalastyle:off println
println("Note: We ignore environment variables, when use of profile is detected in " +
"conjunction with environment variable.")
+ // scalastyle:on println
v.split("(\\s+|,)").filterNot(_.isEmpty).map(_.trim.replaceAll("-P", "")).toSeq
}
diff --git a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkCommandLine.scala b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkCommandLine.scala
index 6480e2d24e..24fbbc12c0 100644
--- a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkCommandLine.scala
+++ b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkCommandLine.scala
@@ -39,6 +39,8 @@ class SparkCommandLine(args: List[String], override val settings: Settings)
}
def this(args: List[String]) {
+ // scalastyle:off println
this(args, str => Console.println("Error: " + str))
+ // scalastyle:on println
}
}
diff --git a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
index 2b23552525..8f7f9074d3 100644
--- a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
+++ b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
@@ -1101,7 +1101,9 @@ object SparkILoop extends Logging {
val s = super.readLine()
// helping out by printing the line being interpreted.
if (s != null)
+ // scalastyle:off println
output.println(s)
+ // scalastyle:on println
s
}
}
diff --git a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala
index 05faef8786..bd3314d94e 100644
--- a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala
+++ b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoopInit.scala
@@ -80,11 +80,13 @@ private[repl] trait SparkILoopInit {
if (!initIsComplete)
withLock { while (!initIsComplete) initLoopCondition.await() }
if (initError != null) {
+ // scalastyle:off println
println("""
|Failed to initialize the REPL due to an unexpected error.
|This is a bug, please, report it along with the error diagnostics printed below.
|%s.""".stripMargin.format(initError)
)
+ // scalastyle:on println
false
} else true
}
diff --git a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala
index 35fb625645..8791618bd3 100644
--- a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala
+++ b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala
@@ -1761,7 +1761,9 @@ object SparkIMain {
if (intp.totalSilence) ()
else super.printMessage(msg)
}
+ // scalastyle:off println
else Console.println(msg)
+ // scalastyle:on println
}
}
}
diff --git a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala
index 7a5e94da5c..3c90287249 100644
--- a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala
+++ b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkILoop.scala
@@ -943,7 +943,9 @@ object SparkILoop {
val s = super.readLine()
// helping out by printing the line being interpreted.
if (s != null)
+ // scalastyle:off println
output.println(s)
+ // scalastyle:on println
s
}
}
diff --git a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkIMain.scala b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkIMain.scala
index 1cb910f376..56c009a4e3 100644
--- a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkIMain.scala
+++ b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkIMain.scala
@@ -129,7 +129,9 @@ class SparkIMain(@BeanProperty val factory: ScriptEngineFactory, initialSettings
}
private def tquoted(s: String) = "\"\"\"" + s + "\"\"\""
private val logScope = scala.sys.props contains "scala.repl.scope"
+ // scalastyle:off println
private def scopelog(msg: String) = if (logScope) Console.err.println(msg)
+ // scalastyle:on println
// argument is a thunk to execute after init is done
def initialize(postInitSignal: => Unit) {
@@ -1297,8 +1299,10 @@ class SparkISettings(intp: SparkIMain) {
def deprecation_=(x: Boolean) = {
val old = intp.settings.deprecation.value
intp.settings.deprecation.value = x
+ // scalastyle:off println
if (!old && x) println("Enabled -deprecation output.")
else if (old && !x) println("Disabled -deprecation output.")
+ // scalastyle:on println
}
def deprecation: Boolean = intp.settings.deprecation.value
diff --git a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkReplReporter.scala b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkReplReporter.scala
index 0711ed4871..272f81eca9 100644
--- a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkReplReporter.scala
+++ b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/SparkReplReporter.scala
@@ -42,7 +42,9 @@ class SparkReplReporter(intp: SparkIMain) extends ConsoleReporter(intp.settings,
}
else super.printMessage(msg)
}
+ // scalastyle:off println
else Console.println("[init] " + msg)
+ // scalastyle:on println
}
override def displayPrompt() {
diff --git a/scalastyle-config.xml b/scalastyle-config.xml
index d6f927b6fa..4961170379 100644
--- a/scalastyle-config.xml
+++ b/scalastyle-config.xml
@@ -141,12 +141,8 @@ This file is divided into 3 sections:
<customMessage>Tests must extend org.apache.spark.SparkFunSuite instead.</customMessage>
</check>
- <!-- ================================================================================ -->
- <!-- rules we'd like to enforce, but haven't cleaned up the codebase yet -->
- <!-- ================================================================================ -->
-
- <!-- SPARK-7977 We should turn this on, but we'd need to add whitelist to files that are using it first. -->
- <check customId="println" level="error" class="org.scalastyle.scalariform.TokenChecker" enabled="false">
+ <!-- As of SPARK-7977 all printlns need to be wrapped in '// scalastyle:off/on println' -->
+ <check customId="println" level="error" class="org.scalastyle.scalariform.TokenChecker" enabled="true">
<parameters><parameter name="regex">^println$</parameter></parameters>
<customMessage><![CDATA[Are you sure you want to println? If yes, wrap the code block with
// scalastyle:off println
@@ -154,6 +150,10 @@ This file is divided into 3 sections:
// scalastyle:on println]]></customMessage>
</check>
+ <!-- ================================================================================ -->
+ <!-- rules we'd like to enforce, but haven't cleaned up the codebase yet -->
+ <!-- ================================================================================ -->
+
<!-- We cannot turn the following two on, because it'd fail a lot of string interpolation use cases. -->
<!-- Ideally the following two rules should be configurable to rule out string interpolation. -->
<check level="error" class="org.scalastyle.scalariform.NoWhitespaceBeforeLeftBracketChecker" enabled="false"></check>
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
index 7f1b12cdd5..606fecbe06 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
@@ -67,8 +67,10 @@ package object codegen {
outfile.write(generatedBytes)
outfile.close()
+ // scalastyle:off println
println(
s"javap -p -v -classpath ${dumpDirectory.getCanonicalPath} ${generatedClass.getName}".!!)
+ // scalastyle:on println
}
}
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
index 2f545bb432..b89e3382f0 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
@@ -154,7 +154,9 @@ abstract class QueryPlan[PlanType <: TreeNode[PlanType]] extends TreeNode[PlanTy
def schemaString: String = schema.treeString
/** Prints out the schema in the tree format */
+ // scalastyle:off println
def printSchema(): Unit = println(schemaString)
+ // scalastyle:on println
/**
* A prefix string used when printing the plan.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
index 07054166a5..71293475ca 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
@@ -124,7 +124,9 @@ package object util {
val startTime = System.nanoTime()
val ret = f
val endTime = System.nanoTime()
+ // scalastyle:off println
println(s"${(endTime - startTime).toDouble / 1000000}ms")
+ // scalastyle:on println
ret
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala
index e0b8ff9178..b8097403ec 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala
@@ -250,7 +250,9 @@ case class StructType(fields: Array[StructField]) extends DataType with Seq[Stru
builder.toString()
}
+ // scalastyle:off println
def printTreeString(): Unit = println(treeString)
+ // scalastyle:on println
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
fields.foreach(field => field.buildFormattedString(prefix, builder))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
index f201c8ea8a..1025026462 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
@@ -860,11 +860,13 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* @since 1.3.0
*/
def explain(extended: Boolean): Unit = {
+ // scalastyle:off println
if (extended) {
println(expr)
} else {
println(expr.prettyString)
}
+ // scalastyle:on println
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index d7966651b1..830fba35bb 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -308,7 +308,9 @@ class DataFrame private[sql](
* @group basic
* @since 1.3.0
*/
+ // scalastyle:off println
def printSchema(): Unit = println(schema.treeString)
+ // scalastyle:on println
/**
* Prints the plans (logical and physical) to the console for debugging purposes.
@@ -319,7 +321,9 @@ class DataFrame private[sql](
ExplainCommand(
queryExecution.logical,
extended = extended).queryExecution.executedPlan.executeCollect().map {
+ // scalastyle:off println
r => println(r.getString(0))
+ // scalastyle:on println
}
}
@@ -392,7 +396,9 @@ class DataFrame private[sql](
* @group action
* @since 1.5.0
*/
+ // scalastyle:off println
def show(numRows: Int, truncate: Boolean): Unit = println(showString(numRows, truncate))
+ // scalastyle:on println
/**
* Returns a [[DataFrameNaFunctions]] for working with missing data.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
index 2964edac1a..e6081cb05b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
@@ -24,7 +24,7 @@ import org.apache.spark.unsafe.types.UTF8String
import scala.collection.mutable.HashSet
-import org.apache.spark.{AccumulatorParam, Accumulator}
+import org.apache.spark.{AccumulatorParam, Accumulator, Logging}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
@@ -57,7 +57,7 @@ package object debug {
* Augments [[DataFrame]]s with debug methods.
*/
@DeveloperApi
- implicit class DebugQuery(query: DataFrame) {
+ implicit class DebugQuery(query: DataFrame) extends Logging {
def debug(): Unit = {
val plan = query.queryExecution.executedPlan
val visited = new collection.mutable.HashSet[TreeNodeRef]()
@@ -66,7 +66,7 @@ package object debug {
visited += new TreeNodeRef(s)
DebugNode(s)
}
- println(s"Results returned: ${debugPlan.execute().count()}")
+ logDebug(s"Results returned: ${debugPlan.execute().count()}")
debugPlan.foreach {
case d: DebugNode => d.dumpStats()
case _ =>
@@ -82,11 +82,11 @@ package object debug {
TypeCheck(s)
}
try {
- println(s"Results returned: ${debugPlan.execute().count()}")
+ logDebug(s"Results returned: ${debugPlan.execute().count()}")
} catch {
case e: Exception =>
def unwrap(e: Throwable): Throwable = if (e.getCause == null) e else unwrap(e.getCause)
- println(s"Deepest Error: ${unwrap(e)}")
+ logDebug(s"Deepest Error: ${unwrap(e)}")
}
}
}
@@ -119,11 +119,11 @@ package object debug {
val columnStats: Array[ColumnMetrics] = Array.fill(child.output.size)(new ColumnMetrics())
def dumpStats(): Unit = {
- println(s"== ${child.simpleString} ==")
- println(s"Tuples output: ${tupleCount.value}")
+ logDebug(s"== ${child.simpleString} ==")
+ logDebug(s"Tuples output: ${tupleCount.value}")
child.output.zip(columnStats).foreach { case(attr, metric) =>
val actualDataTypes = metric.elementTypes.value.mkString("{", ",", "}")
- println(s" ${attr.name} ${attr.dataType}: $actualDataTypes")
+ logDebug(s" ${attr.name} ${attr.dataType}: $actualDataTypes")
}
}
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index 039cfa40d2..f66a17b209 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -40,7 +40,7 @@ import org.apache.spark.Logging
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.util.Utils
-private[hive] object SparkSQLCLIDriver {
+private[hive] object SparkSQLCLIDriver extends Logging {
private var prompt = "spark-sql"
private var continuedPrompt = "".padTo(prompt.length, ' ')
private var transport: TSocket = _
@@ -164,7 +164,7 @@ private[hive] object SparkSQLCLIDriver {
}
} catch {
case e: FileNotFoundException =>
- System.err.println(s"Could not open input file for reading. (${e.getMessage})")
+ logError(s"Could not open input file for reading. (${e.getMessage})")
System.exit(3)
}
@@ -180,14 +180,14 @@ private[hive] object SparkSQLCLIDriver {
val historyFile = historyDirectory + File.separator + ".hivehistory"
reader.setHistory(new History(new File(historyFile)))
} else {
- System.err.println("WARNING: Directory for Hive history file: " + historyDirectory +
+ logWarning("WARNING: Directory for Hive history file: " + historyDirectory +
" does not exist. History will not be available during this session.")
}
} catch {
case e: Exception =>
- System.err.println("WARNING: Encountered an error while trying to initialize Hive's " +
+ logWarning("WARNING: Encountered an error while trying to initialize Hive's " +
"history file. History will not be available during this session.")
- System.err.println(e.getMessage)
+ logWarning(e.getMessage)
}
val clientTransportTSocketField = classOf[CliSessionState].getDeclaredField("transport")
@@ -270,6 +270,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
val proc: CommandProcessor = CommandProcessorFactory.get(Array(tokens(0)), hconf)
if (proc != null) {
+ // scalastyle:off println
if (proc.isInstanceOf[Driver] || proc.isInstanceOf[SetProcessor] ||
proc.isInstanceOf[AddResourceProcessor]) {
val driver = new SparkSQLDriver
@@ -336,6 +337,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
}
ret = proc.run(cmd_1).getResponseCode
}
+ // scalastyle:on println
}
ret
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index bbc39b892b..4684d48aff 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.parse.VariableSubstitution
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.serde2.io.{DateWritable, TimestampWritable}
+import org.apache.spark.Logging
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Experimental
import org.apache.spark.sql._
@@ -65,12 +66,12 @@ private[hive] class HiveQLDialect extends ParserDialect {
*
* @since 1.0.0
*/
-class HiveContext(sc: SparkContext) extends SQLContext(sc) {
+class HiveContext(sc: SparkContext) extends SQLContext(sc) with Logging {
self =>
import HiveContext._
- println("create HiveContext")
+ logDebug("create HiveContext")
/**
* When true, enables an experimental feature where metastore tables that use the parquet SerDe
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
index 2de7a99c12..7fc517b646 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.parse._
import org.apache.hadoop.hive.ql.plan.PlanUtils
import org.apache.hadoop.hive.ql.session.SessionState
+import org.apache.spark.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
@@ -73,7 +74,7 @@ private[hive] case class CreateTableAsSelect(
}
/** Provides a mapping from HiveQL statements to catalyst logical plans and expression trees. */
-private[hive] object HiveQl {
+private[hive] object HiveQl extends Logging {
protected val nativeCommands = Seq(
"TOK_ALTERDATABASE_OWNER",
"TOK_ALTERDATABASE_PROPERTIES",
@@ -186,7 +187,7 @@ private[hive] object HiveQl {
.map(ast => Option(ast).map(_.transform(rule)).orNull))
} catch {
case e: Exception =>
- println(dumpTree(n))
+ logError(dumpTree(n).toString)
throw e
}
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala
index cbd2bf6b5e..9d83ca6c11 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala
@@ -360,7 +360,9 @@ private[hive] class ClientWrapper(
case _ =>
if (state.out != null) {
+ // scalastyle:off println
state.out.println(tokens(0) + " " + cmd_1)
+ // scalastyle:on println
}
Seq(proc.run(cmd_1).getResponseCode.toString)
}
diff --git a/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala b/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala
index 0e428ba1d7..2590040f2e 100644
--- a/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala
+++ b/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala
@@ -30,6 +30,7 @@ import org.apache.spark.sql.hive.HiveContext
*/
object Main {
def main(args: Array[String]) {
+ // scalastyle:off println
println("Running regression test for SPARK-8489.")
val sc = new SparkContext("local", "testing")
val hc = new HiveContext(sc)
@@ -38,6 +39,7 @@ object Main {
val df = hc.createDataFrame(Seq(MyCoolClass("1", "2", "3")))
df.collect()
println("Regression test for SPARK-8489 success!")
+ // scalastyle:on println
sc.stop()
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
index e9bb326679..983c013bcf 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
@@ -17,13 +17,13 @@
package org.apache.spark.sql.hive
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.test.ExamplePointUDT
import org.apache.spark.sql.types.StructType
-class HiveMetastoreCatalogSuite extends SparkFunSuite {
+class HiveMetastoreCatalogSuite extends SparkFunSuite with Logging {
test("struct field should accept underscore in sub-column name") {
val metastr = "struct<a: int, b_1: string, c: string>"
@@ -41,7 +41,7 @@ class HiveMetastoreCatalogSuite extends SparkFunSuite {
test("duplicated metastore relations") {
import TestHive.implicits._
val df = TestHive.sql("SELECT * FROM src")
- println(df.queryExecution)
+ logInfo(df.queryExecution.toString)
df.as('a).join(df.as('b), $"a.key" === $"b.key")
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
index a38ed23b5c..917900e5f4 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
@@ -90,8 +90,10 @@ class HiveSparkSubmitSuite
"SPARK_TESTING" -> "1",
"SPARK_HOME" -> sparkHome
).run(ProcessLogger(
+ // scalastyle:off println
(line: String) => { println(s"out> $line") },
(line: String) => { println(s"err> $line") }
+ // scalastyle:on println
))
try {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index aa5dbe2db6..508695919e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -86,8 +86,6 @@ class InsertIntoHiveTableSuite extends QueryTest with BeforeAndAfter {
val message = intercept[QueryExecutionException] {
sql("CREATE TABLE doubleCreateAndInsertTest (key int, value string)")
}.getMessage
-
- println("message!!!!" + message)
}
test("Double create does not fail when allowExisting = true") {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index cc294bc3e8..d910af22c3 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -26,6 +26,7 @@ import org.scalatest.BeforeAndAfterAll
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.InvalidInputException
+import org.apache.spark.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.hive.client.{HiveTable, ManagedTable}
import org.apache.spark.sql.hive.test.TestHive
@@ -40,7 +41,8 @@ import org.apache.spark.util.Utils
/**
* Tests for persisting tables created though the data sources API into the metastore.
*/
-class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll {
+class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll
+ with Logging {
override val sqlContext = TestHive
var jsonFilePath: String = _
@@ -415,7 +417,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeA
|)
""".stripMargin)
- sql("DROP TABLE jsonTable").collect().foreach(println)
+ sql("DROP TABLE jsonTable").collect().foreach(i => logInfo(i.toString))
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index eaaa88e170..1bde5922b5 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -315,7 +315,6 @@ class PairUDF extends GenericUDF {
)
override def evaluate(args: Array[DeferredObject]): AnyRef = {
- println("Type = %s".format(args(0).getClass.getName))
Integer.valueOf(args(0).get.asInstanceOf[TestPair].entry._2)
}
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala
index 192aa6a139..1da0b0a54d 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala
@@ -720,12 +720,14 @@ abstract class DStream[T: ClassTag] (
def foreachFunc: (RDD[T], Time) => Unit = {
(rdd: RDD[T], time: Time) => {
val firstNum = rdd.take(num + 1)
+ // scalastyle:off println
println("-------------------------------------------")
println("Time: " + time)
println("-------------------------------------------")
firstNum.take(num).foreach(println)
if (firstNum.length > num) println("...")
println()
+ // scalastyle:on println
}
}
new ForEachDStream(this, context.sparkContext.clean(foreachFunc)).register()
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala
index ca2f319f17..6addb96752 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala
@@ -35,7 +35,9 @@ private[streaming]
object RawTextSender extends Logging {
def main(args: Array[String]) {
if (args.length != 4) {
+ // scalastyle:off println
System.err.println("Usage: RawTextSender <port> <file> <blockSize> <bytesPerSec>")
+ // scalastyle:on println
System.exit(1)
}
// Parse the arguments using a pattern match
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/RecurringTimer.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/RecurringTimer.scala
index c8eef833eb..dd32ad5ad8 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/util/RecurringTimer.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/util/RecurringTimer.scala
@@ -106,7 +106,7 @@ class RecurringTimer(clock: Clock, period: Long, callback: (Long) => Unit, name:
}
private[streaming]
-object RecurringTimer {
+object RecurringTimer extends Logging {
def main(args: Array[String]) {
var lastRecurTime = 0L
@@ -114,7 +114,7 @@ object RecurringTimer {
def onRecur(time: Long) {
val currentTime = System.currentTimeMillis()
- println("" + currentTime + ": " + (currentTime - lastRecurTime))
+ logInfo("" + currentTime + ": " + (currentTime - lastRecurTime))
lastRecurTime = currentTime
}
val timer = new RecurringTimer(new SystemClock(), period, onRecur, "Test")
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala b/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala
index e0f14fd954..6e9d443109 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala
@@ -43,6 +43,7 @@ object MasterFailureTest extends Logging {
@volatile var setupCalled = false
def main(args: Array[String]) {
+ // scalastyle:off println
if (args.size < 2) {
println(
"Usage: MasterFailureTest <local/HDFS directory> <# batches> " +
@@ -60,6 +61,7 @@ object MasterFailureTest extends Logging {
testUpdateStateByKey(directory, numBatches, batchDuration)
println("\n\nSUCCESS\n\n")
+ // scalastyle:on println
}
def testMap(directory: String, numBatches: Int, batchDuration: Duration) {
@@ -291,10 +293,12 @@ object MasterFailureTest extends Logging {
}
// Log the output
+ // scalastyle:off println
println("Expected output, size = " + expectedOutput.size)
println(expectedOutput.mkString("[", ",", "]"))
println("Output, size = " + output.size)
println(output.mkString("[", ",", "]"))
+ // scalastyle:on println
// Match the output with the expected output
output.foreach(o =>
diff --git a/streaming/src/test/scala/org/apache/spark/streaming/scheduler/JobGeneratorSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/scheduler/JobGeneratorSuite.scala
index 7865b06c2e..a2dbae149f 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/scheduler/JobGeneratorSuite.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/scheduler/JobGeneratorSuite.scala
@@ -76,7 +76,6 @@ class JobGeneratorSuite extends TestSuiteBase {
if (time.milliseconds == longBatchTime) {
while (waitLatch.getCount() > 0) {
waitLatch.await()
- println("Await over")
}
}
})
diff --git a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala
index 595ded6ae6..9483d2b692 100644
--- a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala
+++ b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala
@@ -92,7 +92,9 @@ object GenerateMIMAIgnore {
ignoredMembers ++= getAnnotatedOrPackagePrivateMembers(classSymbol)
} catch {
+ // scalastyle:off println
case _: Throwable => println("Error instrumenting class:" + className)
+ // scalastyle:on println
}
}
(ignoredClasses.flatMap(c => Seq(c, c.replace("$", "#"))).toSet, ignoredMembers.toSet)
@@ -108,7 +110,9 @@ object GenerateMIMAIgnore {
.filter(_.contains("$$")).map(classSymbol.fullName + "." + _)
} catch {
case t: Throwable =>
+ // scalastyle:off println
println("[WARN] Unable to detect inner functions for class:" + classSymbol.fullName)
+ // scalastyle:on println
Seq.empty[String]
}
}
@@ -128,12 +132,14 @@ object GenerateMIMAIgnore {
getOrElse(Iterator.empty).mkString("\n")
File(".generated-mima-class-excludes")
.writeAll(previousContents + privateClasses.mkString("\n"))
+ // scalastyle:off println
println("Created : .generated-mima-class-excludes in current directory.")
val previousMembersContents = Try(File(".generated-mima-member-excludes").lines)
.getOrElse(Iterator.empty).mkString("\n")
File(".generated-mima-member-excludes").writeAll(previousMembersContents +
privateMembers.mkString("\n"))
println("Created : .generated-mima-member-excludes in current directory.")
+ // scalastyle:on println
}
@@ -174,7 +180,9 @@ object GenerateMIMAIgnore {
try {
classes += Class.forName(entry.replace('/', '.').stripSuffix(".class"), false, classLoader)
} catch {
+ // scalastyle:off println
case _: Throwable => println("Unable to load:" + entry)
+ // scalastyle:on println
}
}
classes
diff --git a/tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala b/tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala
index 583823c90c..856ea177a9 100644
--- a/tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala
+++ b/tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala
@@ -323,11 +323,14 @@ object JavaAPICompletenessChecker {
val missingMethods = javaEquivalents -- javaMethods
for (method <- missingMethods) {
+ // scalastyle:off println
println(method)
+ // scalastyle:on println
}
}
def main(args: Array[String]) {
+ // scalastyle:off println
println("Missing RDD methods")
printMissingMethods(classOf[RDD[_]], classOf[JavaRDD[_]])
println()
@@ -359,5 +362,6 @@ object JavaAPICompletenessChecker {
println("Missing PairDStream methods")
printMissingMethods(classOf[PairDStreamFunctions[_, _]], classOf[JavaPairDStream[_, _]])
println()
+ // scalastyle:on println
}
}
diff --git a/tools/src/main/scala/org/apache/spark/tools/StoragePerfTester.scala b/tools/src/main/scala/org/apache/spark/tools/StoragePerfTester.scala
index baa97616ea..0dc2861253 100644
--- a/tools/src/main/scala/org/apache/spark/tools/StoragePerfTester.scala
+++ b/tools/src/main/scala/org/apache/spark/tools/StoragePerfTester.scala
@@ -85,7 +85,9 @@ object StoragePerfTester {
latch.countDown()
} catch {
case e: Exception =>
+ // scalastyle:off println
println("Exception in child thread: " + e + " " + e.getMessage)
+ // scalastyle:on println
System.exit(1)
}
}
@@ -97,9 +99,11 @@ object StoragePerfTester {
val bytesPerSecond = totalBytes.get() / time
val bytesPerFile = (totalBytes.get() / (numOutputSplits * numMaps.toDouble)).toLong
+ // scalastyle:off println
System.err.println("files_total\t\t%s".format(numMaps * numOutputSplits))
System.err.println("bytes_per_file\t\t%s".format(Utils.bytesToString(bytesPerFile)))
System.err.println("agg_throughput\t\t%s/s".format(Utils.bytesToString(bytesPerSecond.toLong)))
+ // scalastyle:on println
executor.shutdown()
sc.stop()
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala
index 68e9f6b4db..37f7937633 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala
@@ -85,7 +85,9 @@ class ApplicationMasterArguments(val args: Array[String]) {
}
if (primaryPyFile != null && primaryRFile != null) {
+ // scalastyle:off println
System.err.println("Cannot have primary-py-file and primary-r-file at the same time")
+ // scalastyle:on println
System.exit(-1)
}
@@ -93,6 +95,7 @@ class ApplicationMasterArguments(val args: Array[String]) {
}
def printUsageAndExit(exitCode: Int, unknownParam: Any = null) {
+ // scalastyle:off println
if (unknownParam != null) {
System.err.println("Unknown/unsupported param " + unknownParam)
}
@@ -111,6 +114,7 @@ class ApplicationMasterArguments(val args: Array[String]) {
| --executor-cores NUM Number of cores for the executors (Default: 1)
| --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G)
""".stripMargin)
+ // scalastyle:on println
System.exit(exitCode)
}
}
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
index 4d52ae774e..f0af6f875f 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
@@ -938,7 +938,7 @@ private[spark] class Client(
object Client extends Logging {
def main(argStrings: Array[String]) {
if (!sys.props.contains("SPARK_SUBMIT")) {
- println("WARNING: This client is deprecated and will be removed in a " +
+ logWarning("WARNING: This client is deprecated and will be removed in a " +
"future version of Spark. Use ./bin/spark-submit with \"--master yarn\"")
}
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
index 19d1bbff99..20d63d40cf 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
@@ -123,6 +123,7 @@ private[spark] class ClientArguments(args: Array[String], sparkConf: SparkConf)
throw new SparkException("Executor cores must not be less than " +
"spark.task.cpus.")
}
+ // scalastyle:off println
if (isClusterMode) {
for (key <- Seq(amMemKey, amMemOverheadKey, amCoresKey)) {
if (sparkConf.contains(key)) {
@@ -144,11 +145,13 @@ private[spark] class ClientArguments(args: Array[String], sparkConf: SparkConf)
.map(_.toInt)
.foreach { cores => amCores = cores }
}
+ // scalastyle:on println
}
private def parseArgs(inputArgs: List[String]): Unit = {
var args = inputArgs
+ // scalastyle:off println
while (!args.isEmpty) {
args match {
case ("--jar") :: value :: tail =>
@@ -253,6 +256,7 @@ private[spark] class ClientArguments(args: Array[String], sparkConf: SparkConf)
throw new IllegalArgumentException(getUsageMessage(args))
}
}
+ // scalastyle:on println
if (primaryPyFile != null && primaryRFile != null) {
throw new IllegalArgumentException("Cannot have primary-py-file and primary-r-file" +
diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
index 335e966519..547863d9a0 100644
--- a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
@@ -328,12 +328,14 @@ private object YarnClusterDriver extends Logging with Matchers {
def main(args: Array[String]): Unit = {
if (args.length != 1) {
+ // scalastyle:off println
System.err.println(
s"""
|Invalid command line: ${args.mkString(" ")}
|
|Usage: YarnClusterDriver [result file]
""".stripMargin)
+ // scalastyle:on println
System.exit(1)
}
@@ -386,12 +388,14 @@ private object YarnClasspathTest {
def main(args: Array[String]): Unit = {
if (args.length != 2) {
+ // scalastyle:off println
System.err.println(
s"""
|Invalid command line: ${args.mkString(" ")}
|
|Usage: YarnClasspathTest [driver result file] [executor result file]
""".stripMargin)
+ // scalastyle:on println
System.exit(1)
}