aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJonathan Alter <jonalter@users.noreply.github.com>2015-07-10 11:34:01 +0100
committerSean Owen <sowen@cloudera.com>2015-07-10 11:34:01 +0100
commite14b545d2dcbc4587688b4c46718d3680b0a2f67 (patch)
tree8ae5fe8258bce4fd1cb2d55c7031de8b1dcc3963 /core
parentd538919cc4fd3ab940d478c62dce1bae0270cfeb (diff)
downloadspark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.tar.gz
spark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.tar.bz2
spark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.zip
[SPARK-7977] [BUILD] Disallowing println
Author: Jonathan Alter <jonalter@users.noreply.github.com> Closes #7093 from jonalter/SPARK-7977 and squashes the following commits: ccd44cc [Jonathan Alter] Changed println to log in ThreadingSuite 7fcac3e [Jonathan Alter] Reverting to println in ThreadingSuite 10724b6 [Jonathan Alter] Changing some printlns to logs in tests eeec1e7 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 0b1dcb4 [Jonathan Alter] More println cleanup aedaf80 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 925fd98 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 0c16fa3 [Jonathan Alter] Replacing some printlns with logs 45c7e05 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 5c8e283 [Jonathan Alter] Allowing println in audit-release examples 5b50da1 [Jonathan Alter] Allowing printlns in example files ca4b477 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 83ab635 [Jonathan Alter] Fixing new printlns 54b131f [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 1cd8a81 [Jonathan Alter] Removing some unnecessary comments and printlns b837c3a [Jonathan Alter] Disallowing println
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/Logging.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/r/RBackend.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/r/RRDD.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/Client.scala30
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/RRunner.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala18
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala7
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala22
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala34
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/ui/JettyUtils.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/util/Distribution.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/DistributedSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/FailureSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/FileServerSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/ThreadingSuite.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala8
-rw-r--r--core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/UtilsSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala10
39 files changed, 147 insertions, 78 deletions
diff --git a/core/src/main/scala/org/apache/spark/Logging.scala b/core/src/main/scala/org/apache/spark/Logging.scala
index 7fcb7830e7..87ab099267 100644
--- a/core/src/main/scala/org/apache/spark/Logging.scala
+++ b/core/src/main/scala/org/apache/spark/Logging.scala
@@ -121,6 +121,7 @@ trait Logging {
if (usingLog4j12) {
val log4j12Initialized = LogManager.getRootLogger.getAllAppenders.hasMoreElements
if (!log4j12Initialized) {
+ // scalastyle:off println
if (Utils.isInInterpreter) {
val replDefaultLogProps = "org/apache/spark/log4j-defaults-repl.properties"
Option(Utils.getSparkClassLoader.getResource(replDefaultLogProps)) match {
@@ -141,6 +142,7 @@ trait Logging {
System.err.println(s"Spark was unable to load $defaultLogProps")
}
}
+ // scalastyle:on println
}
}
Logging.initialized = true
diff --git a/core/src/main/scala/org/apache/spark/api/r/RBackend.scala b/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
index 1a5f2bca26..b7e72d4d0e 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
@@ -95,7 +95,9 @@ private[spark] class RBackend {
private[spark] object RBackend extends Logging {
def main(args: Array[String]): Unit = {
if (args.length < 1) {
+ // scalastyle:off println
System.err.println("Usage: RBackend <tempFilePath>")
+ // scalastyle:on println
System.exit(-1)
}
val sparkRBackend = new RBackend()
diff --git a/core/src/main/scala/org/apache/spark/api/r/RRDD.scala b/core/src/main/scala/org/apache/spark/api/r/RRDD.scala
index 524676544d..ff1702f7de 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RRDD.scala
@@ -161,7 +161,9 @@ private abstract class BaseRRDD[T: ClassTag, U: ClassTag](
dataOut.write(elem.asInstanceOf[Array[Byte]])
} else if (deserializer == SerializationFormats.STRING) {
// write string(for StringRRDD)
+ // scalastyle:off println
printOut.println(elem)
+ // scalastyle:on println
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala b/core/src/main/scala/org/apache/spark/deploy/Client.scala
index 71f7e21291..f03875a3e8 100644
--- a/core/src/main/scala/org/apache/spark/deploy/Client.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala
@@ -118,26 +118,26 @@ private class ClientEndpoint(
def pollAndReportStatus(driverId: String) {
// Since ClientEndpoint is the only RpcEndpoint in the process, blocking the event loop thread
// is fine.
- println("... waiting before polling master for driver state")
+ logInfo("... waiting before polling master for driver state")
Thread.sleep(5000)
- println("... polling master for driver state")
+ logInfo("... polling master for driver state")
val statusResponse =
activeMasterEndpoint.askWithRetry[DriverStatusResponse](RequestDriverStatus(driverId))
statusResponse.found match {
case false =>
- println(s"ERROR: Cluster master did not recognize $driverId")
+ logError(s"ERROR: Cluster master did not recognize $driverId")
System.exit(-1)
case true =>
- println(s"State of $driverId is ${statusResponse.state.get}")
+ logInfo(s"State of $driverId is ${statusResponse.state.get}")
// Worker node, if present
(statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match {
case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) =>
- println(s"Driver running on $hostPort ($id)")
+ logInfo(s"Driver running on $hostPort ($id)")
case _ =>
}
// Exception, if present
statusResponse.exception.map { e =>
- println(s"Exception from cluster was: $e")
+ logError(s"Exception from cluster was: $e")
e.printStackTrace()
System.exit(-1)
}
@@ -148,7 +148,7 @@ private class ClientEndpoint(
override def receive: PartialFunction[Any, Unit] = {
case SubmitDriverResponse(master, success, driverId, message) =>
- println(message)
+ logInfo(message)
if (success) {
activeMasterEndpoint = master
pollAndReportStatus(driverId.get)
@@ -158,7 +158,7 @@ private class ClientEndpoint(
case KillDriverResponse(master, driverId, success, message) =>
- println(message)
+ logInfo(message)
if (success) {
activeMasterEndpoint = master
pollAndReportStatus(driverId)
@@ -169,13 +169,13 @@ private class ClientEndpoint(
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
if (!lostMasters.contains(remoteAddress)) {
- println(s"Error connecting to master $remoteAddress.")
+ logError(s"Error connecting to master $remoteAddress.")
lostMasters += remoteAddress
// Note that this heuristic does not account for the fact that a Master can recover within
// the lifetime of this client. Thus, once a Master is lost it is lost to us forever. This
// is not currently a concern, however, because this client does not retry submissions.
if (lostMasters.size >= masterEndpoints.size) {
- println("No master is available, exiting.")
+ logError("No master is available, exiting.")
System.exit(-1)
}
}
@@ -183,18 +183,18 @@ private class ClientEndpoint(
override def onNetworkError(cause: Throwable, remoteAddress: RpcAddress): Unit = {
if (!lostMasters.contains(remoteAddress)) {
- println(s"Error connecting to master ($remoteAddress).")
- println(s"Cause was: $cause")
+ logError(s"Error connecting to master ($remoteAddress).")
+ logError(s"Cause was: $cause")
lostMasters += remoteAddress
if (lostMasters.size >= masterEndpoints.size) {
- println("No master is available, exiting.")
+ logError("No master is available, exiting.")
System.exit(-1)
}
}
}
override def onError(cause: Throwable): Unit = {
- println(s"Error processing messages, exiting.")
+ logError(s"Error processing messages, exiting.")
cause.printStackTrace()
System.exit(-1)
}
@@ -209,10 +209,12 @@ private class ClientEndpoint(
*/
object Client {
def main(args: Array[String]) {
+ // scalastyle:off println
if (!sys.props.contains("SPARK_SUBMIT")) {
println("WARNING: This client is deprecated and will be removed in a future version of Spark")
println("Use ./bin/spark-submit with \"--master spark://host:port\"")
}
+ // scalastyle:on println
val conf = new SparkConf()
val driverArgs = new ClientArguments(args)
diff --git a/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala b/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
index 42d3296062..72cc330a39 100644
--- a/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/ClientArguments.scala
@@ -72,9 +72,11 @@ private[deploy] class ClientArguments(args: Array[String]) {
cmd = "launch"
if (!ClientArguments.isValidJarUrl(_jarUrl)) {
+ // scalastyle:off println
println(s"Jar url '${_jarUrl}' is not in valid format.")
println(s"Must be a jar file path in URL format " +
"(e.g. hdfs://host:port/XX.jar, file:///XX.jar)")
+ // scalastyle:on println
printUsageAndExit(-1)
}
@@ -110,7 +112,9 @@ private[deploy] class ClientArguments(args: Array[String]) {
| (default: $DEFAULT_SUPERVISE)
| -v, --verbose Print more debugging output
""".stripMargin
+ // scalastyle:off println
System.err.println(usage)
+ // scalastyle:on println
System.exit(exitCode)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/RRunner.scala b/core/src/main/scala/org/apache/spark/deploy/RRunner.scala
index e99779f299..4165740312 100644
--- a/core/src/main/scala/org/apache/spark/deploy/RRunner.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/RRunner.scala
@@ -85,7 +85,9 @@ object RRunner {
}
System.exit(returnCode)
} else {
+ // scalastyle:off println
System.err.println("SparkR backend did not initialize in " + backendTimeout + " seconds")
+ // scalastyle:on println
System.exit(-1)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index b1d6ec209d..4cec9017b8 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -82,6 +82,7 @@ object SparkSubmit {
private val CLASS_NOT_FOUND_EXIT_STATUS = 101
+ // scalastyle:off println
// Exposed for testing
private[spark] var exitFn: Int => Unit = (exitCode: Int) => System.exit(exitCode)
private[spark] var printStream: PrintStream = System.err
@@ -102,11 +103,14 @@ object SparkSubmit {
printStream.println("Type --help for more information.")
exitFn(0)
}
+ // scalastyle:on println
def main(args: Array[String]): Unit = {
val appArgs = new SparkSubmitArguments(args)
if (appArgs.verbose) {
+ // scalastyle:off println
printStream.println(appArgs)
+ // scalastyle:on println
}
appArgs.action match {
case SparkSubmitAction.SUBMIT => submit(appArgs)
@@ -160,7 +164,9 @@ object SparkSubmit {
// makes the message printed to the output by the JVM not very helpful. Instead,
// detect exceptions with empty stack traces here, and treat them differently.
if (e.getStackTrace().length == 0) {
+ // scalastyle:off println
printStream.println(s"ERROR: ${e.getClass().getName()}: ${e.getMessage()}")
+ // scalastyle:on println
exitFn(1)
} else {
throw e
@@ -178,7 +184,9 @@ object SparkSubmit {
// to use the legacy gateway if the master endpoint turns out to be not a REST server.
if (args.isStandaloneCluster && args.useRest) {
try {
+ // scalastyle:off println
printStream.println("Running Spark using the REST application submission protocol.")
+ // scalastyle:on println
doRunMain()
} catch {
// Fail over to use the legacy submission gateway
@@ -558,6 +566,7 @@ object SparkSubmit {
sysProps: Map[String, String],
childMainClass: String,
verbose: Boolean): Unit = {
+ // scalastyle:off println
if (verbose) {
printStream.println(s"Main class:\n$childMainClass")
printStream.println(s"Arguments:\n${childArgs.mkString("\n")}")
@@ -565,6 +574,7 @@ object SparkSubmit {
printStream.println(s"Classpath elements:\n${childClasspath.mkString("\n")}")
printStream.println("\n")
}
+ // scalastyle:on println
val loader =
if (sysProps.getOrElse("spark.driver.userClassPathFirst", "false").toBoolean) {
@@ -592,8 +602,10 @@ object SparkSubmit {
case e: ClassNotFoundException =>
e.printStackTrace(printStream)
if (childMainClass.contains("thriftserver")) {
+ // scalastyle:off println
printStream.println(s"Failed to load main class $childMainClass.")
printStream.println("You need to build Spark with -Phive and -Phive-thriftserver.")
+ // scalastyle:on println
}
System.exit(CLASS_NOT_FOUND_EXIT_STATUS)
}
@@ -766,7 +778,9 @@ private[spark] object SparkSubmitUtils {
brr.setRoot(repo)
brr.setName(s"repo-${i + 1}")
cr.add(brr)
+ // scalastyle:off println
printStream.println(s"$repo added as a remote repository with the name: ${brr.getName}")
+ // scalastyle:on println
}
}
@@ -829,7 +843,9 @@ private[spark] object SparkSubmitUtils {
val ri = ModuleRevisionId.newInstance(mvn.groupId, mvn.artifactId, mvn.version)
val dd = new DefaultDependencyDescriptor(ri, false, false)
dd.addDependencyConfiguration(ivyConfName, ivyConfName)
+ // scalastyle:off println
printStream.println(s"${dd.getDependencyId} added as a dependency")
+ // scalastyle:on println
md.addDependency(dd)
}
}
@@ -896,9 +912,11 @@ private[spark] object SparkSubmitUtils {
ivySettings.setDefaultCache(new File(alternateIvyCache, "cache"))
new File(alternateIvyCache, "jars")
}
+ // scalastyle:off println
printStream.println(
s"Ivy Default Cache set to: ${ivySettings.getDefaultCache.getAbsolutePath}")
printStream.println(s"The jars for the packages stored in: $packagesDirectory")
+ // scalastyle:on println
// create a pattern matcher
ivySettings.addMatcher(new GlobPatternMatcher)
// create the dependency resolvers
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
index 6e3c0b21b3..ebb39c354d 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
@@ -79,6 +79,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
/** Default properties present in the currently defined defaults file. */
lazy val defaultSparkProperties: HashMap[String, String] = {
val defaultProperties = new HashMap[String, String]()
+ // scalastyle:off println
if (verbose) SparkSubmit.printStream.println(s"Using properties file: $propertiesFile")
Option(propertiesFile).foreach { filename =>
Utils.getPropertiesFromFile(filename).foreach { case (k, v) =>
@@ -86,6 +87,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
if (verbose) SparkSubmit.printStream.println(s"Adding default property: $k=$v")
}
}
+ // scalastyle:on println
defaultProperties
}
@@ -452,6 +454,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
}
private def printUsageAndExit(exitCode: Int, unknownParam: Any = null): Unit = {
+ // scalastyle:off println
val outStream = SparkSubmit.printStream
if (unknownParam != null) {
outStream.println("Unknown/unsupported param " + unknownParam)
@@ -541,6 +544,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
outStream.println("CLI options:")
outStream.println(getSqlShellOptions())
}
+ // scalastyle:on println
SparkSubmit.exitFn(exitCode)
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala b/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala
index c5ac45c673..a98b1fa8f8 100644
--- a/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/client/TestExecutor.scala
@@ -19,7 +19,9 @@ package org.apache.spark.deploy.client
private[spark] object TestExecutor {
def main(args: Array[String]) {
+ // scalastyle:off println
println("Hello world!")
+ // scalastyle:on println
while (true) {
Thread.sleep(1000)
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
index 4692d22651..18265df9fa 100644
--- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
@@ -56,6 +56,7 @@ private[history] class HistoryServerArguments(conf: SparkConf, args: Array[Strin
Utils.loadDefaultSparkProperties(conf, propertiesFile)
private def printUsageAndExit(exitCode: Int) {
+ // scalastyle:off println
System.err.println(
"""
|Usage: HistoryServer [options]
@@ -84,6 +85,7 @@ private[history] class HistoryServerArguments(conf: SparkConf, args: Array[Strin
| spark.history.fs.updateInterval How often to reload log data from storage
| (in seconds, default: 10)
|""".stripMargin)
+ // scalastyle:on println
System.exit(exitCode)
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala b/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
index 435b9b12f8..44cefbc77f 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/MasterArguments.scala
@@ -85,6 +85,7 @@ private[master] class MasterArguments(args: Array[String], conf: SparkConf) {
* Print usage and exit JVM with the given exit code.
*/
private def printUsageAndExit(exitCode: Int) {
+ // scalastyle:off println
System.err.println(
"Usage: Master [options]\n" +
"\n" +
@@ -95,6 +96,7 @@ private[master] class MasterArguments(args: Array[String], conf: SparkConf) {
" --webui-port PORT Port for web UI (default: 8080)\n" +
" --properties-file FILE Path to a custom Spark properties file.\n" +
" Default is conf/spark-defaults.conf.")
+ // scalastyle:on println
System.exit(exitCode)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala b/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
index 894cb78d85..5accaf78d0 100644
--- a/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherArguments.scala
@@ -54,7 +54,9 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
case ("--master" | "-m") :: value :: tail =>
if (!value.startsWith("mesos://")) {
+ // scalastyle:off println
System.err.println("Cluster dispatcher only supports mesos (uri begins with mesos://)")
+ // scalastyle:on println
System.exit(1)
}
masterUrl = value.stripPrefix("mesos://")
@@ -73,7 +75,9 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
case Nil => {
if (masterUrl == null) {
+ // scalastyle:off println
System.err.println("--master is required")
+ // scalastyle:on println
printUsageAndExit(1)
}
}
@@ -83,6 +87,7 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
}
private def printUsageAndExit(exitCode: Int): Unit = {
+ // scalastyle:off println
System.err.println(
"Usage: MesosClusterDispatcher [options]\n" +
"\n" +
@@ -96,6 +101,7 @@ private[mesos] class MesosClusterDispatcherArguments(args: Array[String], conf:
" Zookeeper for persistence\n" +
" --properties-file FILE Path to a custom Spark properties file.\n" +
" Default is conf/spark-defaults.conf.")
+ // scalastyle:on println
System.exit(exitCode)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala b/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala
index d1a12b01e7..2d6be3042c 100644
--- a/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala
@@ -60,7 +60,9 @@ object DriverWrapper {
rpcEnv.shutdown()
case _ =>
+ // scalastyle:off println
System.err.println("Usage: DriverWrapper <workerUrl> <userJar> <driverMainClass> [options]")
+ // scalastyle:on println
System.exit(-1)
}
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
index 1d2ecab517..e89d076802 100644
--- a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala
@@ -121,6 +121,7 @@ private[worker] class WorkerArguments(args: Array[String], conf: SparkConf) {
* Print usage and exit JVM with the given exit code.
*/
def printUsageAndExit(exitCode: Int) {
+ // scalastyle:off println
System.err.println(
"Usage: Worker [options] <master>\n" +
"\n" +
@@ -136,6 +137,7 @@ private[worker] class WorkerArguments(args: Array[String], conf: SparkConf) {
" --webui-port PORT Port for web UI (default: 8081)\n" +
" --properties-file FILE Path to a custom Spark properties file.\n" +
" Default is conf/spark-defaults.conf.")
+ // scalastyle:on println
System.exit(exitCode)
}
@@ -160,7 +162,9 @@ private[worker] class WorkerArguments(args: Array[String], conf: SparkConf) {
} catch {
case e: Exception => {
totalMb = 2*1024
+ // scalastyle:off println
System.out.println("Failed to get total physical memory. Using " + totalMb + " MB")
+ // scalastyle:on println
}
}
// Leave out 1 GB for the operating system, but don't return a negative memory size
diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index 34d4cfdca7..fcd76ec527 100644
--- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -235,7 +235,9 @@ private[spark] object CoarseGrainedExecutorBackend extends Logging {
argv = tail
case Nil =>
case tail =>
+ // scalastyle:off println
System.err.println(s"Unrecognized options: ${tail.mkString(" ")}")
+ // scalastyle:on println
printUsageAndExit()
}
}
@@ -249,6 +251,7 @@ private[spark] object CoarseGrainedExecutorBackend extends Logging {
}
private def printUsageAndExit() = {
+ // scalastyle:off println
System.err.println(
"""
|"Usage: CoarseGrainedExecutorBackend [options]
@@ -262,6 +265,7 @@ private[spark] object CoarseGrainedExecutorBackend extends Logging {
| --worker-url <workerUrl>
| --user-class-path <url>
|""".stripMargin)
+ // scalastyle:on println
System.exit(1)
}
diff --git a/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala b/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala
index c219d21fbe..532850dd57 100644
--- a/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala
+++ b/core/src/main/scala/org/apache/spark/input/FixedLengthBinaryInputFormat.scala
@@ -21,6 +21,8 @@ import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable}
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.{InputSplit, JobContext, RecordReader, TaskAttemptContext}
+
+import org.apache.spark.Logging
import org.apache.spark.deploy.SparkHadoopUtil
/**
@@ -39,7 +41,8 @@ private[spark] object FixedLengthBinaryInputFormat {
}
private[spark] class FixedLengthBinaryInputFormat
- extends FileInputFormat[LongWritable, BytesWritable] {
+ extends FileInputFormat[LongWritable, BytesWritable]
+ with Logging {
private var recordLength = -1
@@ -51,7 +54,7 @@ private[spark] class FixedLengthBinaryInputFormat
recordLength = FixedLengthBinaryInputFormat.getRecordLength(context)
}
if (recordLength <= 0) {
- println("record length is less than 0, file cannot be split")
+ logDebug("record length is less than 0, file cannot be split")
false
} else {
true
diff --git a/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala b/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
index 67a3761029..79cb0640c8 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
@@ -57,16 +57,6 @@ private[nio] class BlockMessage() {
}
def set(buffer: ByteBuffer) {
- /*
- println()
- println("BlockMessage: ")
- while(buffer.remaining > 0) {
- print(buffer.get())
- }
- buffer.rewind()
- println()
- println()
- */
typ = buffer.getInt()
val idLength = buffer.getInt()
val idBuilder = new StringBuilder(idLength)
@@ -138,18 +128,6 @@ private[nio] class BlockMessage() {
buffers += data
}
- /*
- println()
- println("BlockMessage: ")
- buffers.foreach(b => {
- while(b.remaining > 0) {
- print(b.get())
- }
- b.rewind()
- })
- println()
- println()
- */
Message.createBufferMessage(buffers)
}
diff --git a/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala b/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala
index 7d0806f0c2..f1c9ea8b64 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/BlockMessageArray.scala
@@ -43,16 +43,6 @@ class BlockMessageArray(var blockMessages: Seq[BlockMessage])
val newBlockMessages = new ArrayBuffer[BlockMessage]()
val buffer = bufferMessage.buffers(0)
buffer.clear()
- /*
- println()
- println("BlockMessageArray: ")
- while(buffer.remaining > 0) {
- print(buffer.get())
- }
- buffer.rewind()
- println()
- println()
- */
while (buffer.remaining() > 0) {
val size = buffer.getInt()
logDebug("Creating block message of size " + size + " bytes")
@@ -86,23 +76,11 @@ class BlockMessageArray(var blockMessages: Seq[BlockMessage])
logDebug("Buffer list:")
buffers.foreach((x: ByteBuffer) => logDebug("" + x))
- /*
- println()
- println("BlockMessageArray: ")
- buffers.foreach(b => {
- while(b.remaining > 0) {
- print(b.get())
- }
- b.rewind()
- })
- println()
- println()
- */
Message.createBufferMessage(buffers)
}
}
-private[nio] object BlockMessageArray {
+private[nio] object BlockMessageArray extends Logging {
def fromBufferMessage(bufferMessage: BufferMessage): BlockMessageArray = {
val newBlockMessageArray = new BlockMessageArray()
@@ -123,10 +101,10 @@ private[nio] object BlockMessageArray {
}
}
val blockMessageArray = new BlockMessageArray(blockMessages)
- println("Block message array created")
+ logDebug("Block message array created")
val bufferMessage = blockMessageArray.toBufferMessage
- println("Converted to buffer message")
+ logDebug("Converted to buffer message")
val totalSize = bufferMessage.size
val newBuffer = ByteBuffer.allocate(totalSize)
@@ -138,10 +116,11 @@ private[nio] object BlockMessageArray {
})
newBuffer.flip
val newBufferMessage = Message.createBufferMessage(newBuffer)
- println("Copied to new buffer message, size = " + newBufferMessage.size)
+ logDebug("Copied to new buffer message, size = " + newBufferMessage.size)
val newBlockMessageArray = BlockMessageArray.fromBufferMessage(newBufferMessage)
- println("Converted back to block message array")
+ logDebug("Converted back to block message array")
+ // scalastyle:off println
newBlockMessageArray.foreach(blockMessage => {
blockMessage.getType match {
case BlockMessage.TYPE_PUT_BLOCK => {
@@ -154,6 +133,7 @@ private[nio] object BlockMessageArray {
}
}
})
+ // scalastyle:on println
}
}
diff --git a/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala b/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
index c0bca2c4bc..9143918790 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
@@ -1016,7 +1016,9 @@ private[spark] object ConnectionManager {
val conf = new SparkConf
val manager = new ConnectionManager(9999, conf, new SecurityManager(conf))
manager.onReceiveMessage((msg: Message, id: ConnectionManagerId) => {
+ // scalastyle:off println
println("Received [" + msg + "] from [" + id + "]")
+ // scalastyle:on println
None
})
@@ -1033,6 +1035,7 @@ private[spark] object ConnectionManager {
System.gc()
}
+ // scalastyle:off println
def testSequentialSending(manager: ConnectionManager) {
println("--------------------------")
println("Sequential Sending")
@@ -1150,4 +1153,5 @@ private[spark] object ConnectionManager {
println()
}
}
+ // scalastyle:on println
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala
index dc60d48927..defdabf95a 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala
@@ -123,7 +123,9 @@ private[spark] class PipedRDD[T: ClassTag](
new Thread("stderr reader for " + command) {
override def run() {
for (line <- Source.fromInputStream(proc.getErrorStream).getLines) {
+ // scalastyle:off println
System.err.println(line)
+ // scalastyle:on println
}
}
}.start()
@@ -133,6 +135,7 @@ private[spark] class PipedRDD[T: ClassTag](
override def run() {
val out = new PrintWriter(proc.getOutputStream)
+ // scalastyle:off println
// input the pipe context firstly
if (printPipeContext != null) {
printPipeContext(out.println(_))
@@ -144,6 +147,7 @@ private[spark] class PipedRDD[T: ClassTag](
out.println(elem)
}
}
+ // scalastyle:on println
out.close()
}
}.start()
diff --git a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
index 529a5b2bf1..62b05033a9 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
@@ -140,7 +140,9 @@ private[spark] class EventLoggingListener(
/** Log the event as JSON. */
private def logEvent(event: SparkListenerEvent, flushLogger: Boolean = false) {
val eventJson = JsonProtocol.sparkEventToJson(event)
+ // scalastyle:off println
writer.foreach(_.println(compact(render(eventJson))))
+ // scalastyle:on println
if (flushLogger) {
writer.foreach(_.flush())
hadoopDataStream.foreach(hadoopFlushMethod.invoke(_))
diff --git a/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala b/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
index e55b76c36c..f96eb8ca0a 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
@@ -125,7 +125,9 @@ class JobLogger(val user: String, val logDirName: String) extends SparkListener
val date = new Date(System.currentTimeMillis())
writeInfo = dateFormat.get.format(date) + ": " + info
}
+ // scalastyle:off println
jobIdToPrintWriter.get(jobId).foreach(_.println(writeInfo))
+ // scalastyle:on println
}
/**
diff --git a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
index f413c1d37f..c8356467fa 100644
--- a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
+++ b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
@@ -68,7 +68,9 @@ private[spark] object JettyUtils extends Logging {
response.setStatus(HttpServletResponse.SC_OK)
val result = servletParams.responder(request)
response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate")
+ // scalastyle:off println
response.getWriter.println(servletParams.extractFn(result))
+ // scalastyle:on println
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED)
response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate")
diff --git a/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala b/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala
index ba03acdb38..5a8c291431 100644
--- a/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala
+++ b/core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala
@@ -38,9 +38,11 @@ private[spark] object UIWorkloadGenerator {
def main(args: Array[String]) {
if (args.length < 3) {
+ // scalastyle:off println
println(
- "usage: ./bin/spark-class org.apache.spark.ui.UIWorkloadGenerator " +
+ "Usage: ./bin/spark-class org.apache.spark.ui.UIWorkloadGenerator " +
"[master] [FIFO|FAIR] [#job set (4 jobs per set)]")
+ // scalastyle:on println
System.exit(1)
}
@@ -96,6 +98,7 @@ private[spark] object UIWorkloadGenerator {
for ((desc, job) <- jobs) {
new Thread {
override def run() {
+ // scalastyle:off println
try {
setProperties(desc)
job()
@@ -106,6 +109,7 @@ private[spark] object UIWorkloadGenerator {
} finally {
barrier.release()
}
+ // scalastyle:on println
}
}.start
Thread.sleep(INTER_JOB_WAIT_MS)
diff --git a/core/src/main/scala/org/apache/spark/util/Distribution.scala b/core/src/main/scala/org/apache/spark/util/Distribution.scala
index 1bab707235..950b69f7db 100644
--- a/core/src/main/scala/org/apache/spark/util/Distribution.scala
+++ b/core/src/main/scala/org/apache/spark/util/Distribution.scala
@@ -52,9 +52,11 @@ private[spark] class Distribution(val data: Array[Double], val startIdx: Int, va
}
def showQuantiles(out: PrintStream = System.out): Unit = {
+ // scalastyle:off println
out.println("min\t25%\t50%\t75%\tmax")
getQuantiles(defaultProbabilities).foreach{q => out.print(q + "\t")}
out.println
+ // scalastyle:on println
}
def statCounter: StatCounter = StatCounter(data.slice(startIdx, endIdx))
@@ -64,8 +66,10 @@ private[spark] class Distribution(val data: Array[Double], val startIdx: Int, va
* @param out
*/
def summary(out: PrintStream = System.out) {
+ // scalastyle:off println
out.println(statCounter)
showQuantiles(out)
+ // scalastyle:on println
}
}
@@ -80,8 +84,10 @@ private[spark] object Distribution {
}
def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) {
+ // scalastyle:off println
out.println("min\t25%\t50%\t75%\tmax")
quantiles.foreach{q => out.print(q + "\t")}
out.println
+ // scalastyle:on println
}
}
diff --git a/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala b/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala
index c4a7b4441c..85fb923cd9 100644
--- a/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala
+++ b/core/src/main/scala/org/apache/spark/util/random/XORShiftRandom.scala
@@ -70,12 +70,14 @@ private[spark] object XORShiftRandom {
* @param args takes one argument - the number of random numbers to generate
*/
def main(args: Array[String]): Unit = {
+ // scalastyle:off println
if (args.length != 1) {
println("Benchmark of XORShiftRandom vis-a-vis java.util.Random")
println("Usage: XORShiftRandom number_of_random_numbers_to_generate")
System.exit(1)
}
println(benchmark(args(0).toInt))
+ // scalastyle:on println
}
/**
diff --git a/core/src/test/scala/org/apache/spark/DistributedSuite.scala b/core/src/test/scala/org/apache/spark/DistributedSuite.scala
index 9c191ed522..2300bcff4f 100644
--- a/core/src/test/scala/org/apache/spark/DistributedSuite.scala
+++ b/core/src/test/scala/org/apache/spark/DistributedSuite.scala
@@ -107,7 +107,9 @@ class DistributedSuite extends SparkFunSuite with Matchers with LocalSparkContex
sc = new SparkContext(clusterUrl, "test")
val accum = sc.accumulator(0)
val thrown = intercept[SparkException] {
+ // scalastyle:off println
sc.parallelize(1 to 10, 10).foreach(x => println(x / 0))
+ // scalastyle:on println
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getMessage.contains("failed 4 times"))
diff --git a/core/src/test/scala/org/apache/spark/FailureSuite.scala b/core/src/test/scala/org/apache/spark/FailureSuite.scala
index a8c8c6f73f..b099cd3fb7 100644
--- a/core/src/test/scala/org/apache/spark/FailureSuite.scala
+++ b/core/src/test/scala/org/apache/spark/FailureSuite.scala
@@ -130,7 +130,9 @@ class FailureSuite extends SparkFunSuite with LocalSparkContext {
// Non-serializable closure in foreach function
val thrown2 = intercept[SparkException] {
+ // scalastyle:off println
sc.parallelize(1 to 10, 2).foreach(x => println(a))
+ // scalastyle:on println
}
assert(thrown2.getClass === classOf[SparkException])
assert(thrown2.getMessage.contains("NotSerializableException") ||
diff --git a/core/src/test/scala/org/apache/spark/FileServerSuite.scala b/core/src/test/scala/org/apache/spark/FileServerSuite.scala
index 6e65b0a8f6..876418aa13 100644
--- a/core/src/test/scala/org/apache/spark/FileServerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/FileServerSuite.scala
@@ -51,7 +51,9 @@ class FileServerSuite extends SparkFunSuite with LocalSparkContext {
val textFile = new File(testTempDir, "FileServerSuite.txt")
val pw = new PrintWriter(textFile)
+ // scalastyle:off println
pw.println("100")
+ // scalastyle:on println
pw.close()
val jarFile = new File(testTempDir, "test.jar")
diff --git a/core/src/test/scala/org/apache/spark/ThreadingSuite.scala b/core/src/test/scala/org/apache/spark/ThreadingSuite.scala
index 6580139df6..48509f0759 100644
--- a/core/src/test/scala/org/apache/spark/ThreadingSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ThreadingSuite.scala
@@ -36,7 +36,7 @@ object ThreadingSuiteState {
}
}
-class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
+class ThreadingSuite extends SparkFunSuite with LocalSparkContext with Logging {
test("accessing SparkContext form a different thread") {
sc = new SparkContext("local", "test")
@@ -130,8 +130,6 @@ class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
Thread.sleep(100)
}
if (running.get() != 4) {
- println("Waited 1 second without seeing runningThreads = 4 (it was " +
- running.get() + "); failing test")
ThreadingSuiteState.failed.set(true)
}
number
@@ -143,6 +141,8 @@ class ThreadingSuite extends SparkFunSuite with LocalSparkContext {
}
sem.acquire(2)
if (ThreadingSuiteState.failed.get()) {
+ logError("Waited 1 second without seeing runningThreads = 4 (it was " +
+ ThreadingSuiteState.runningThreads.get() + "); failing test")
fail("One or more threads didn't see runningThreads = 4")
}
}
diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
index 2e05dec99b..1b64c329b5 100644
--- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
@@ -51,9 +51,11 @@ class SparkSubmitSuite
/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
+ // scalastyle:off println
override def println(line: String) {
lineBuffer += line
}
+ // scalastyle:on println
}
/** Returns true if the script exits and the given search string is printed. */
@@ -81,6 +83,7 @@ class SparkSubmitSuite
}
}
+ // scalastyle:off println
test("prints usage on empty input") {
testPrematureExit(Array[String](), "Usage: spark-submit")
}
@@ -491,6 +494,7 @@ class SparkSubmitSuite
appArgs.executorMemory should be ("2.3g")
}
}
+ // scalastyle:on println
// NOTE: This is an expensive operation in terms of time (10 seconds+). Use sparingly.
private def runSparkSubmit(args: Seq[String]): Unit = {
diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala
index c9b435a922..01ece1a10f 100644
--- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala
@@ -41,9 +41,11 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll {
/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
+ // scalastyle:off println
override def println(line: String) {
lineBuffer += line
}
+ // scalastyle:on println
}
override def beforeAll() {
diff --git a/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala b/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala
index 63947df3d4..8a199459c1 100644
--- a/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala
+++ b/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala
@@ -27,7 +27,7 @@ import org.scalatest.BeforeAndAfterAll
import org.apache.hadoop.io.Text
-import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
+import org.apache.spark.{Logging, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.util.Utils
import org.apache.hadoop.io.compress.{DefaultCodec, CompressionCodecFactory, GzipCodec}
@@ -36,7 +36,7 @@ import org.apache.hadoop.io.compress.{DefaultCodec, CompressionCodecFactory, Gzi
* [[org.apache.spark.input.WholeTextFileRecordReader WholeTextFileRecordReader]]. A temporary
* directory is created as fake input. Temporal storage would be deleted in the end.
*/
-class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAll {
+class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAll with Logging {
private var sc: SparkContext = _
private var factory: CompressionCodecFactory = _
@@ -85,7 +85,7 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl
*/
test("Correctness of WholeTextFileRecordReader.") {
val dir = Utils.createTempDir()
- println(s"Local disk address is ${dir.toString}.")
+ logInfo(s"Local disk address is ${dir.toString}.")
WholeTextFileRecordReaderSuite.files.foreach { case (filename, contents) =>
createNativeFile(dir, filename, contents, false)
@@ -109,7 +109,7 @@ class WholeTextFileRecordReaderSuite extends SparkFunSuite with BeforeAndAfterAl
test("Correctness of WholeTextFileRecordReader with GzipCodec.") {
val dir = Utils.createTempDir()
- println(s"Local disk address is ${dir.toString}.")
+ logInfo(s"Local disk address is ${dir.toString}.")
WholeTextFileRecordReaderSuite.files.foreach { case (filename, contents) =>
createNativeFile(dir, filename, contents, true)
diff --git a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
index 9e4d34fb7d..d3218a548e 100644
--- a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
@@ -60,7 +60,9 @@ class InputOutputMetricsSuite extends SparkFunSuite with SharedSparkContext
tmpFile = new File(testTempDir, getClass.getSimpleName + ".txt")
val pw = new PrintWriter(new FileWriter(tmpFile))
for (x <- 1 to numRecords) {
+ // scalastyle:off println
pw.println(RandomUtils.nextInt(0, numBuckets))
+ // scalastyle:on println
}
pw.close()
diff --git a/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala
index ff3fa95ec3..4e3defb43a 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala
@@ -52,8 +52,10 @@ class ReplayListenerSuite extends SparkFunSuite with BeforeAndAfter {
val applicationStart = SparkListenerApplicationStart("Greatest App (N)ever", None,
125L, "Mickey", None)
val applicationEnd = SparkListenerApplicationEnd(1000L)
+ // scalastyle:off println
writer.println(compact(render(JsonProtocol.sparkEventToJson(applicationStart))))
writer.println(compact(render(JsonProtocol.sparkEventToJson(applicationEnd))))
+ // scalastyle:on println
writer.close()
val conf = EventLoggingListenerSuite.getLoggingConf(logFilePath)
diff --git a/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala b/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
index 1053c6caf7..480722a5ac 100644
--- a/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
@@ -375,6 +375,7 @@ class TestCreateNullValue {
// parameters of the closure constructor. This allows us to test whether
// null values are created correctly for each type.
val nestedClosure = () => {
+ // scalastyle:off println
if (s.toString == "123") { // Don't really output them to avoid noisy
println(bo)
println(c)
@@ -389,6 +390,7 @@ class TestCreateNullValue {
val closure = () => {
println(getX)
}
+ // scalastyle:on println
ClosureCleaner.clean(closure)
}
nestedClosure()
diff --git a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
index 251a797dc2..c7638507c8 100644
--- a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala
@@ -684,7 +684,9 @@ class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging {
val buffer = new CircularBuffer(25)
val stream = new java.io.PrintStream(buffer, true, "UTF-8")
+ // scalastyle:off println
stream.println("test circular test circular test circular test circular test circular")
+ // scalastyle:on println
assert(buffer.toString === "t circular test circular\n")
}
}
diff --git a/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala b/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
index 5a5919fca2..4f382414a8 100644
--- a/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/collection/SizeTrackerSuite.scala
@@ -103,7 +103,9 @@ private object SizeTrackerSuite {
*/
def main(args: Array[String]): Unit = {
if (args.size < 1) {
+ // scalastyle:off println
println("Usage: SizeTrackerSuite [num elements]")
+ // scalastyle:on println
System.exit(1)
}
val numElements = args(0).toInt
@@ -180,11 +182,13 @@ private object SizeTrackerSuite {
baseTimes: Seq[Long],
sampledTimes: Seq[Long],
unsampledTimes: Seq[Long]): Unit = {
+ // scalastyle:off println
println(s"Average times for $testName (ms):")
println(" Base - " + averageTime(baseTimes))
println(" SizeTracker (sampled) - " + averageTime(sampledTimes))
println(" SizeEstimator (unsampled) - " + averageTime(unsampledTimes))
println()
+ // scalastyle:on println
}
def time(f: => Unit): Long = {
diff --git a/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala b/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala
index b2f5d9009e..fefa5165db 100644
--- a/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala
@@ -20,10 +20,10 @@ package org.apache.spark.util.collection
import java.lang.{Float => JFloat, Integer => JInteger}
import java.util.{Arrays, Comparator}
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.util.random.XORShiftRandom
-class SorterSuite extends SparkFunSuite {
+class SorterSuite extends SparkFunSuite with Logging {
test("equivalent to Arrays.sort") {
val rand = new XORShiftRandom(123)
@@ -74,7 +74,7 @@ class SorterSuite extends SparkFunSuite {
/** Runs an experiment several times. */
def runExperiment(name: String, skip: Boolean = false)(f: => Unit, prepare: () => Unit): Unit = {
if (skip) {
- println(s"Skipped experiment $name.")
+ logInfo(s"Skipped experiment $name.")
return
}
@@ -86,11 +86,11 @@ class SorterSuite extends SparkFunSuite {
while (i < 10) {
val time = org.apache.spark.util.Utils.timeIt(1)(f, Some(prepare))
next10 += time
- println(s"$name: Took $time ms")
+ logInfo(s"$name: Took $time ms")
i += 1
}
- println(s"$name: ($firstTry ms first try, ${next10 / 10} ms average)")
+ logInfo(s"$name: ($firstTry ms first try, ${next10 / 10} ms average)")
}
/**