aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-31 00:16:22 -0700
committerReynold Xin <rxin@databricks.com>2015-05-31 00:16:22 -0700
commit74fdc97c7206c6d715f128ef7c46055e0bb90760 (patch)
tree5dc2093b56278e7870f7f249edd50e4268ec30cf
parent7896e99b2a0a160bd0b6c5c11cf40b6cbf4a65cf (diff)
downloadspark-74fdc97c7206c6d715f128ef7c46055e0bb90760.tar.gz
spark-74fdc97c7206c6d715f128ef7c46055e0bb90760.tar.bz2
spark-74fdc97c7206c6d715f128ef7c46055e0bb90760.zip
[SPARK-3850] Trim trailing spaces for core.
Author: Reynold Xin <rxin@databricks.com> Closes #6533 from rxin/whitespace-2 and squashes the following commits: 038314c [Reynold Xin] [SPARK-3850] Trim trailing spaces for core.
-rw-r--r--core/src/main/scala/org/apache/spark/Aggregator.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/FutureAction.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala20
-rw-r--r--core/src/main/scala/org/apache/spark/HttpFileServer.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/SparkConf.scala12
-rw-r--r--core/src/main/scala/org/apache/spark/TestUtils.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/api/r/RBackend.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala16
-rw-r--r--core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/metrics/sink/package.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/ReplayListenerBus.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/Task.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/shuffle/hash/BlockStoreShuffleFetcher.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala8
-rw-r--r--core/src/main/scala/org/apache/spark/storage/DiskBlockManager.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/storage/TachyonBlockManager.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/ui/WebUI.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/util/AsynchronousListenerBus.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/util/SizeEstimator.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/FailureSuite.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala28
-rw-r--r--core/src/test/scala/org/apache/spark/SparkContextSuite.scala10
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/JdbcRDDSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/serializer/ProactiveClosureSerializationSuite.scala18
-rw-r--r--core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/random/RandomSamplerSuite.scala2
47 files changed, 117 insertions, 117 deletions
diff --git a/core/src/main/scala/org/apache/spark/Aggregator.scala b/core/src/main/scala/org/apache/spark/Aggregator.scala
index b8a5f50168..ceeb58075d 100644
--- a/core/src/main/scala/org/apache/spark/Aggregator.scala
+++ b/core/src/main/scala/org/apache/spark/Aggregator.scala
@@ -34,8 +34,8 @@ case class Aggregator[K, V, C] (
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C) {
- // When spilling is enabled sorting will happen externally, but not necessarily with an
- // ExternalSorter.
+ // When spilling is enabled sorting will happen externally, but not necessarily with an
+ // ExternalSorter.
private val isSpillEnabled = SparkEnv.get.conf.getBoolean("spark.shuffle.spill", true)
@deprecated("use combineValuesByKey with TaskContext argument", "0.9.0")
diff --git a/core/src/main/scala/org/apache/spark/FutureAction.scala b/core/src/main/scala/org/apache/spark/FutureAction.scala
index 91f9ef8ce7..48792a9581 100644
--- a/core/src/main/scala/org/apache/spark/FutureAction.scala
+++ b/core/src/main/scala/org/apache/spark/FutureAction.scala
@@ -150,7 +150,7 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc:
}
override def isCompleted: Boolean = jobWaiter.jobFinished
-
+
override def isCancelled: Boolean = _cancelled
override def value: Option[Try[T]] = {
diff --git a/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala b/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
index f2b024ff6c..6909015ff6 100644
--- a/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
+++ b/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
@@ -29,7 +29,7 @@ import org.apache.spark.util.{ThreadUtils, Utils}
/**
* A heartbeat from executors to the driver. This is a shared message used by several internal
- * components to convey liveness or execution information for in-progress tasks. It will also
+ * components to convey liveness or execution information for in-progress tasks. It will also
* expire the hosts that have not heartbeated for more than spark.network.timeout.
*/
private[spark] case class Heartbeat(
@@ -43,8 +43,8 @@ private[spark] case class Heartbeat(
*/
private[spark] case object TaskSchedulerIsSet
-private[spark] case object ExpireDeadHosts
-
+private[spark] case object ExpireDeadHosts
+
private[spark] case class HeartbeatResponse(reregisterBlockManager: Boolean)
/**
@@ -62,18 +62,18 @@ private[spark] class HeartbeatReceiver(sc: SparkContext)
// "spark.network.timeout" uses "seconds", while `spark.storage.blockManagerSlaveTimeoutMs` uses
// "milliseconds"
- private val slaveTimeoutMs =
+ private val slaveTimeoutMs =
sc.conf.getTimeAsMs("spark.storage.blockManagerSlaveTimeoutMs", "120s")
- private val executorTimeoutMs =
+ private val executorTimeoutMs =
sc.conf.getTimeAsSeconds("spark.network.timeout", s"${slaveTimeoutMs}ms") * 1000
-
+
// "spark.network.timeoutInterval" uses "seconds", while
// "spark.storage.blockManagerTimeoutIntervalMs" uses "milliseconds"
- private val timeoutIntervalMs =
+ private val timeoutIntervalMs =
sc.conf.getTimeAsMs("spark.storage.blockManagerTimeoutIntervalMs", "60s")
- private val checkTimeoutIntervalMs =
+ private val checkTimeoutIntervalMs =
sc.conf.getTimeAsSeconds("spark.network.timeoutInterval", s"${timeoutIntervalMs}ms") * 1000
-
+
private var timeoutCheckingTask: ScheduledFuture[_] = null
// "eventLoopThread" is used to run some pretty fast actions. The actions running in it should not
@@ -140,7 +140,7 @@ private[spark] class HeartbeatReceiver(sc: SparkContext)
}
}
}
-
+
override def onStop(): Unit = {
if (timeoutCheckingTask != null) {
timeoutCheckingTask.cancel(true)
diff --git a/core/src/main/scala/org/apache/spark/HttpFileServer.scala b/core/src/main/scala/org/apache/spark/HttpFileServer.scala
index 7e706bcc42..7cf7bc0dc6 100644
--- a/core/src/main/scala/org/apache/spark/HttpFileServer.scala
+++ b/core/src/main/scala/org/apache/spark/HttpFileServer.scala
@@ -50,8 +50,8 @@ private[spark] class HttpFileServer(
def stop() {
httpServer.stop()
-
- // If we only stop sc, but the driver process still run as a services then we need to delete
+
+ // If we only stop sc, but the driver process still run as a services then we need to delete
// the tmp dir, if not, it will create too many tmp dirs
try {
Utils.deleteRecursively(baseDir)
diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala
index 4b5bcb54aa..46d72841dc 100644
--- a/core/src/main/scala/org/apache/spark/SparkConf.scala
+++ b/core/src/main/scala/org/apache/spark/SparkConf.scala
@@ -227,7 +227,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
def getSizeAsBytes(key: String, defaultValue: String): Long = {
Utils.byteStringAsBytes(get(key, defaultValue))
}
-
+
/**
* Get a size parameter as Kibibytes; throws a NoSuchElementException if it's not set. If no
* suffix is provided then Kibibytes are assumed.
@@ -244,7 +244,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
def getSizeAsKb(key: String, defaultValue: String): Long = {
Utils.byteStringAsKb(get(key, defaultValue))
}
-
+
/**
* Get a size parameter as Mebibytes; throws a NoSuchElementException if it's not set. If no
* suffix is provided then Mebibytes are assumed.
@@ -261,7 +261,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
def getSizeAsMb(key: String, defaultValue: String): Long = {
Utils.byteStringAsMb(get(key, defaultValue))
}
-
+
/**
* Get a size parameter as Gibibytes; throws a NoSuchElementException if it's not set. If no
* suffix is provided then Gibibytes are assumed.
@@ -278,7 +278,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
def getSizeAsGb(key: String, defaultValue: String): Long = {
Utils.byteStringAsGb(get(key, defaultValue))
}
-
+
/** Get a parameter as an Option */
def getOption(key: String): Option[String] = {
Option(settings.get(key)).orElse(getDeprecatedConfig(key, this))
@@ -480,7 +480,7 @@ private[spark] object SparkConf extends Logging {
"spark.kryoserializer.buffer.mb was previously specified as '0.064'. Fractional values " +
"are no longer accepted. To specify the equivalent now, one may use '64k'.")
)
-
+
Map(configs.map { cfg => (cfg.key -> cfg) } : _*)
}
@@ -508,7 +508,7 @@ private[spark] object SparkConf extends Logging {
"spark.reducer.maxSizeInFlight" -> Seq(
AlternateConfig("spark.reducer.maxMbInFlight", "1.4")),
"spark.kryoserializer.buffer" ->
- Seq(AlternateConfig("spark.kryoserializer.buffer.mb", "1.4",
+ Seq(AlternateConfig("spark.kryoserializer.buffer.mb", "1.4",
translation = s => s"${(s.toDouble * 1000).toInt}k")),
"spark.kryoserializer.buffer.max" -> Seq(
AlternateConfig("spark.kryoserializer.buffer.max.mb", "1.4")),
diff --git a/core/src/main/scala/org/apache/spark/TestUtils.scala b/core/src/main/scala/org/apache/spark/TestUtils.scala
index fe6320b504..a1ebbecf93 100644
--- a/core/src/main/scala/org/apache/spark/TestUtils.scala
+++ b/core/src/main/scala/org/apache/spark/TestUtils.scala
@@ -51,7 +51,7 @@ private[spark] object TestUtils {
classpathUrls: Seq[URL] = Seq()): URL = {
val tempDir = Utils.createTempDir()
val files1 = for (name <- classNames) yield {
- createCompiledClass(name, tempDir, toStringValue, classpathUrls = classpathUrls)
+ createCompiledClass(name, tempDir, toStringValue, classpathUrls = classpathUrls)
}
val files2 = for ((childName, baseName) <- classNamesWithBase) yield {
createCompiledClass(childName, tempDir, toStringValue, baseName, classpathUrls)
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
index 61af867b11..a650df605b 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
@@ -137,7 +137,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double])
*/
def sample(withReplacement: Boolean, fraction: JDouble): JavaDoubleRDD =
sample(withReplacement, fraction, Utils.random.nextLong)
-
+
/**
* Return a sampled subset of this RDD.
*/
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala
index db4e996feb..ed312770ee 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala
@@ -101,7 +101,7 @@ class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T])
/**
* Return a sampled subset of this RDD.
- *
+ *
* @param withReplacement can elements be sampled multiple times (replaced when sampled out)
* @param fraction expected size of the sample as a fraction of this RDD's size
* without replacement: probability that each element is chosen; fraction must be [0, 1]
@@ -109,10 +109,10 @@ class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T])
*/
def sample(withReplacement: Boolean, fraction: Double): JavaRDD[T] =
sample(withReplacement, fraction, Utils.random.nextLong)
-
+
/**
* Return a sampled subset of this RDD.
- *
+ *
* @param withReplacement can elements be sampled multiple times (replaced when sampled out)
* @param fraction expected size of the sample as a fraction of this RDD's size
* without replacement: probability that each element is chosen; fraction must be [0, 1]
diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
index 51388f01a3..55a37f8c94 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
@@ -797,10 +797,10 @@ private class PythonAccumulatorParam(@transient serverHost: String, serverPort:
val bufferSize = SparkEnv.get.conf.getInt("spark.buffer.size", 65536)
- /**
+ /**
* We try to reuse a single Socket to transfer accumulator updates, as they are all added
* by the DAGScheduler's single-threaded actor anyway.
- */
+ */
@transient var socket: Socket = _
def openSocket(): Socket = synchronized {
diff --git a/core/src/main/scala/org/apache/spark/api/r/RBackend.scala b/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
index 0a91977928..d24c650d37 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RBackend.scala
@@ -44,11 +44,11 @@ private[spark] class RBackend {
bossGroup = new NioEventLoopGroup(2)
val workerGroup = bossGroup
val handler = new RBackendHandler(this)
-
+
bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(classOf[NioServerSocketChannel])
-
+
bootstrap.childHandler(new ChannelInitializer[SocketChannel]() {
def initChannel(ch: SocketChannel): Unit = {
ch.pipeline()
diff --git a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
index 026a1b9380..2e86984c66 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
@@ -77,7 +77,7 @@ private[r] class RBackendHandler(server: RBackend)
val reply = bos.toByteArray
ctx.write(reply)
}
-
+
override def channelReadComplete(ctx: ChannelHandlerContext): Unit = {
ctx.flush()
}
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index d1b32ea077..8cf4d58847 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -869,7 +869,7 @@ private[spark] object SparkSubmitUtils {
md.addDependency(dd)
}
}
-
+
/** Add exclusion rules for dependencies already included in the spark-assembly */
def addExclusionRules(
ivySettings: IvySettings,
diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
index a2a97a7877..4692d22651 100644
--- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
@@ -23,7 +23,7 @@ import org.apache.spark.util.Utils
/**
* Command-line parser for the master.
*/
-private[history] class HistoryServerArguments(conf: SparkConf, args: Array[String])
+private[history] class HistoryServerArguments(conf: SparkConf, args: Array[String])
extends Logging {
private var propertiesFile: String = null
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala b/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
index 80db6d474b..328d95a7a0 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
@@ -32,7 +32,7 @@ import org.apache.spark.deploy.SparkCuratorUtil
private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serialization: Serialization)
extends PersistenceEngine
with Logging {
-
+
private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)
diff --git a/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala b/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
index d90ae405a0..38b61d7242 100644
--- a/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
+++ b/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
@@ -43,22 +43,22 @@ class TaskMetrics extends Serializable {
private var _hostname: String = _
def hostname: String = _hostname
private[spark] def setHostname(value: String) = _hostname = value
-
+
/**
* Time taken on the executor to deserialize this task
*/
private var _executorDeserializeTime: Long = _
def executorDeserializeTime: Long = _executorDeserializeTime
private[spark] def setExecutorDeserializeTime(value: Long) = _executorDeserializeTime = value
-
-
+
+
/**
* Time the executor spends actually running the task (including fetching shuffle data)
*/
private var _executorRunTime: Long = _
def executorRunTime: Long = _executorRunTime
private[spark] def setExecutorRunTime(value: Long) = _executorRunTime = value
-
+
/**
* The number of bytes this task transmitted back to the driver as the TaskResult
*/
@@ -315,7 +315,7 @@ class ShuffleReadMetrics extends Serializable {
def remoteBlocksFetched: Int = _remoteBlocksFetched
private[spark] def incRemoteBlocksFetched(value: Int) = _remoteBlocksFetched += value
private[spark] def decRemoteBlocksFetched(value: Int) = _remoteBlocksFetched -= value
-
+
/**
* Number of local blocks fetched in this shuffle by this task
*/
@@ -333,7 +333,7 @@ class ShuffleReadMetrics extends Serializable {
def fetchWaitTime: Long = _fetchWaitTime
private[spark] def incFetchWaitTime(value: Long) = _fetchWaitTime += value
private[spark] def decFetchWaitTime(value: Long) = _fetchWaitTime -= value
-
+
/**
* Total number of remote bytes read from the shuffle by this task
*/
@@ -381,7 +381,7 @@ class ShuffleWriteMetrics extends Serializable {
def shuffleBytesWritten: Long = _shuffleBytesWritten
private[spark] def incShuffleBytesWritten(value: Long) = _shuffleBytesWritten += value
private[spark] def decShuffleBytesWritten(value: Long) = _shuffleBytesWritten -= value
-
+
/**
* Time the task spent blocking on writes to disk or buffer cache, in nanoseconds
*/
@@ -389,7 +389,7 @@ class ShuffleWriteMetrics extends Serializable {
def shuffleWriteTime: Long = _shuffleWriteTime
private[spark] def incShuffleWriteTime(value: Long) = _shuffleWriteTime += value
private[spark] def decShuffleWriteTime(value: Long) = _shuffleWriteTime -= value
-
+
/**
* Total number of records written to the shuffle by this task
*/
diff --git a/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala b/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala
index e8b3074e8f..11dfcfe2f0 100644
--- a/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala
+++ b/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala
@@ -26,9 +26,9 @@ import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem
private[spark] class Slf4jSink(
- val property: Properties,
+ val property: Properties,
val registry: MetricRegistry,
- securityMgr: SecurityManager)
+ securityMgr: SecurityManager)
extends Sink {
val SLF4J_DEFAULT_PERIOD = 10
val SLF4J_DEFAULT_UNIT = "SECONDS"
diff --git a/core/src/main/scala/org/apache/spark/metrics/sink/package.scala b/core/src/main/scala/org/apache/spark/metrics/sink/package.scala
index 90e3aa70b9..670e683663 100644
--- a/core/src/main/scala/org/apache/spark/metrics/sink/package.scala
+++ b/core/src/main/scala/org/apache/spark/metrics/sink/package.scala
@@ -20,4 +20,4 @@ package org.apache.spark.metrics
/**
* Sinks used in Spark's metrics system.
*/
-package object sink
+package object sink
diff --git a/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala b/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala
index bbf1b83af0..ca1eb1f4e4 100644
--- a/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala
@@ -85,9 +85,9 @@ class AsyncRDDActions[T: ClassTag](self: RDD[T]) extends Serializable with Loggi
numPartsToTry = partsScanned * 4
} else {
// the left side of max is >=1 whenever partsScanned >= 2
- numPartsToTry = Math.max(1,
+ numPartsToTry = Math.max(1,
(1.5 * num * partsScanned / results.size).toInt - partsScanned)
- numPartsToTry = Math.min(numPartsToTry, partsScanned * 4)
+ numPartsToTry = Math.min(numPartsToTry, partsScanned * 4)
}
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
index 2ab967f4bb..84456d6d86 100644
--- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
@@ -196,7 +196,7 @@ class NewHadoopRDD[K, V](
override def getPreferredLocations(hsplit: Partition): Seq[String] = {
val split = hsplit.asInstanceOf[NewHadoopPartition].serializableHadoopSplit.value
val locs = HadoopRDD.SPLIT_INFO_REFLECTIONS match {
- case Some(c) =>
+ case Some(c) =>
try {
val infos = c.newGetLocationInfo.invoke(split).asInstanceOf[Array[AnyRef]]
Some(HadoopRDD.convertSplitLocationInfo(infos))
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index 004899f27b..cfd3e26faf 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -328,7 +328,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
reduceByKeyLocally(func)
}
- /**
+ /**
* Count the number of elements for each key, collecting the results to a local Map.
*
* Note that this method should only be used if the resulting map is expected to be small, as
diff --git a/core/src/main/scala/org/apache/spark/scheduler/ReplayListenerBus.scala b/core/src/main/scala/org/apache/spark/scheduler/ReplayListenerBus.scala
index 86f357abb8..c6d957b65f 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/ReplayListenerBus.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/ReplayListenerBus.scala
@@ -41,7 +41,7 @@ private[spark] class ReplayListenerBus extends SparkListenerBus with Logging {
*
* @param logData Stream containing event log data.
* @param sourceName Filename (or other source identifier) from whence @logData is being read
- * @param maybeTruncated Indicate whether log file might be truncated (some abnormal situations
+ * @param maybeTruncated Indicate whether log file might be truncated (some abnormal situations
* encountered, log file might not finished writing) or not
*/
def replay(
@@ -62,7 +62,7 @@ private[spark] class ReplayListenerBus extends SparkListenerBus with Logging {
if (!maybeTruncated || lines.hasNext) {
throw jpe
} else {
- logWarning(s"Got JsonParseException from log file $sourceName" +
+ logWarning(s"Got JsonParseException from log file $sourceName" +
s" at line $lineNumber, the file might not have finished writing cleanly.")
}
}
diff --git a/core/src/main/scala/org/apache/spark/scheduler/Task.scala b/core/src/main/scala/org/apache/spark/scheduler/Task.scala
index 586d1e0620..15101c64f0 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/Task.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/Task.scala
@@ -125,7 +125,7 @@ private[spark] abstract class Task[T](val stageId: Int, var partitionId: Int) ex
if (interruptThread && taskThread != null) {
taskThread.interrupt()
}
- }
+ }
}
/**
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
index d473e51aba..673cd0e19e 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
@@ -861,9 +861,9 @@ private[spark] class TaskSetManager(
case TaskLocality.RACK_LOCAL => "spark.locality.wait.rack"
case _ => null
}
-
+
if (localityWaitKey != null) {
- conf.getTimeAsMs(localityWaitKey, defaultWait)
+ conf.getTimeAsMs(localityWaitKey, defaultWait)
} else {
0L
}
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
index c5bc6294a5..fcad959540 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
@@ -84,7 +84,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp
override def onStart() {
// Periodically revive offers to allow delay scheduling to work
val reviveIntervalMs = conf.getTimeAsMs("spark.scheduler.revive.interval", "1s")
-
+
reviveThread.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
Option(self).foreach(_.send(ReviveOffers))
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
index 2f2934c249..e79c543a9d 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
@@ -37,14 +37,14 @@ private[mesos] object MesosSchedulerBackendUtil extends Logging {
.newBuilder()
.setMode(Volume.Mode.RW)
spec match {
- case Array(container_path) =>
+ case Array(container_path) =>
Some(vol.setContainerPath(container_path))
case Array(container_path, "rw") =>
Some(vol.setContainerPath(container_path))
case Array(container_path, "ro") =>
Some(vol.setContainerPath(container_path)
.setMode(Volume.Mode.RO))
- case Array(host_path, container_path) =>
+ case Array(host_path, container_path) =>
Some(vol.setContainerPath(container_path)
.setHostPath(host_path))
case Array(host_path, container_path, "rw") =>
diff --git a/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala b/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala
index 3f909885db..cd8a82347a 100644
--- a/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala
+++ b/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala
@@ -52,7 +52,7 @@ class KryoSerializer(conf: SparkConf)
with Serializable {
private val bufferSizeKb = conf.getSizeAsKb("spark.kryoserializer.buffer", "64k")
-
+
if (bufferSizeKb >= ByteUnit.GiB.toKiB(2)) {
throw new IllegalArgumentException("spark.kryoserializer.buffer must be less than " +
s"2048 mb, got: + ${ByteUnit.KiB.toMiB(bufferSizeKb)} mb.")
diff --git a/core/src/main/scala/org/apache/spark/shuffle/hash/BlockStoreShuffleFetcher.scala b/core/src/main/scala/org/apache/spark/shuffle/hash/BlockStoreShuffleFetcher.scala
index 80374adc44..597d46a3d2 100644
--- a/core/src/main/scala/org/apache/spark/shuffle/hash/BlockStoreShuffleFetcher.scala
+++ b/core/src/main/scala/org/apache/spark/shuffle/hash/BlockStoreShuffleFetcher.scala
@@ -80,7 +80,7 @@ private[hash] object BlockStoreShuffleFetcher extends Logging {
blocksByAddress,
serializer,
// Note: we use getSizeAsMb when no suffix is provided for backwards compatibility
- SparkEnv.get.conf.getSizeAsMb("spark.reducer.maxSizeInFlight", "48m") * 1024 * 1024)
+ SparkEnv.get.conf.getSizeAsMb("spark.reducer.maxSizeInFlight", "48m") * 1024 * 1024)
val itr = blockFetcherItr.flatMap(unpackBlock)
val completionIter = CompletionIterator[T, Iterator[T]](itr, {
diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala
index fd24aea63a..f9812f06cf 100644
--- a/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala
+++ b/core/src/main/scala/org/apache/spark/status/api/v1/OneStageResource.scala
@@ -83,7 +83,7 @@ private[v1] class OneStageResource(ui: SparkUI) {
withStageAttempt(stageId, stageAttemptId) { stage =>
val tasks = stage.ui.taskData.values.map{AllStagesResource.convertTaskData}.toIndexedSeq
.sorted(OneStageResource.ordering(sortBy))
- tasks.slice(offset, offset + length)
+ tasks.slice(offset, offset + length)
}
}
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
index 3afb4c3c02..2cd8c5297b 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
@@ -292,16 +292,16 @@ class BlockManagerMasterEndpoint(
blockManagerIdByExecutor.get(id.executorId) match {
case Some(oldId) =>
// A block manager of the same executor already exists, so remove it (assumed dead)
- logError("Got two different block manager registrations on same executor - "
+ logError("Got two different block manager registrations on same executor - "
+ s" will replace old one $oldId with new one $id")
- removeExecutor(id.executorId)
+ removeExecutor(id.executorId)
case None =>
}
logInfo("Registering block manager %s with %s RAM, %s".format(
id.hostPort, Utils.bytesToString(maxMemSize), id))
-
+
blockManagerIdByExecutor(id.executorId) = id
-
+
blockManagerInfo(id) = new BlockManagerInfo(
id, System.currentTimeMillis(), maxMemSize, slaveEndpoint)
}
diff --git a/core/src/main/scala/org/apache/spark/storage/DiskBlockManager.scala b/core/src/main/scala/org/apache/spark/storage/DiskBlockManager.scala
index d441a4d31b..91ef86389a 100644
--- a/core/src/main/scala/org/apache/spark/storage/DiskBlockManager.scala
+++ b/core/src/main/scala/org/apache/spark/storage/DiskBlockManager.scala
@@ -151,7 +151,7 @@ private[spark] class DiskBlockManager(blockManager: BlockManager, conf: SparkCon
try {
Utils.removeShutdownHook(shutdownHook)
} catch {
- case e: Exception =>
+ case e: Exception =>
logError(s"Exception while removing shutdown hook.", e)
}
doStop()
diff --git a/core/src/main/scala/org/apache/spark/storage/TachyonBlockManager.scala b/core/src/main/scala/org/apache/spark/storage/TachyonBlockManager.scala
index fb4ba0eac9..b53c86e89a 100644
--- a/core/src/main/scala/org/apache/spark/storage/TachyonBlockManager.scala
+++ b/core/src/main/scala/org/apache/spark/storage/TachyonBlockManager.scala
@@ -100,7 +100,7 @@ private[spark] class TachyonBlockManager() extends ExternalBlockManager with Log
try {
os.write(bytes.array())
} catch {
- case NonFatal(e) =>
+ case NonFatal(e) =>
logWarning(s"Failed to put bytes of block $blockId into Tachyon", e)
os.cancel()
} finally {
@@ -114,7 +114,7 @@ private[spark] class TachyonBlockManager() extends ExternalBlockManager with Log
try {
blockManager.dataSerializeStream(blockId, os, values)
} catch {
- case NonFatal(e) =>
+ case NonFatal(e) =>
logWarning(s"Failed to put values of block $blockId into Tachyon", e)
os.cancel()
} finally {
diff --git a/core/src/main/scala/org/apache/spark/ui/WebUI.scala b/core/src/main/scala/org/apache/spark/ui/WebUI.scala
index 594df15e9c..2c84e44859 100644
--- a/core/src/main/scala/org/apache/spark/ui/WebUI.scala
+++ b/core/src/main/scala/org/apache/spark/ui/WebUI.scala
@@ -62,12 +62,12 @@ private[spark] abstract class WebUI(
tab.pages.foreach(attachPage)
tabs += tab
}
-
+
def detachTab(tab: WebUITab) {
tab.pages.foreach(detachPage)
tabs -= tab
}
-
+
def detachPage(page: WebUIPage) {
pageToHandlers.remove(page).foreach(_.foreach(detachHandler))
}
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala b/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala
index 246e191d64..f39e961772 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/JobProgressListener.scala
@@ -119,7 +119,7 @@ class JobProgressListener(conf: SparkConf) extends SparkListener with Logging {
"failedStages" -> failedStages.size
)
}
-
+
// These collections may grow arbitrarily, but once Spark becomes idle they should shrink back to
// some bound based on the `spark.ui.retainedStages` and `spark.ui.retainedJobs` settings:
private[spark] def getSizesOfSoftSizeLimitedCollections: Map[String, Int] = {
diff --git a/core/src/main/scala/org/apache/spark/util/AsynchronousListenerBus.scala b/core/src/main/scala/org/apache/spark/util/AsynchronousListenerBus.scala
index ce7887b76f..1861d38640 100644
--- a/core/src/main/scala/org/apache/spark/util/AsynchronousListenerBus.scala
+++ b/core/src/main/scala/org/apache/spark/util/AsynchronousListenerBus.scala
@@ -40,7 +40,7 @@ private[spark] abstract class AsynchronousListenerBus[L <: AnyRef, E](name: Stri
self =>
private var sparkContext: SparkContext = null
-
+
/* Cap the capacity of the event queue so we get an explicit error (rather than
* an OOM exception) if it's perpetually being added to more quickly than it's being drained. */
private val EVENT_QUEUE_CAPACITY = 10000
diff --git a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
index f1f6b5e1f9..0180399c9d 100644
--- a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
+++ b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
@@ -236,7 +236,7 @@ object SizeEstimator extends Logging {
val s1 = sampleArray(array, state, rand, drawn, length)
val s2 = sampleArray(array, state, rand, drawn, length)
val size = math.min(s1, s2)
- state.size += math.max(s1, s2) +
+ state.size += math.max(s1, s2) +
(size * ((length - ARRAY_SAMPLE_SIZE) / (ARRAY_SAMPLE_SIZE))).toLong
}
}
@@ -244,7 +244,7 @@ object SizeEstimator extends Logging {
private def sampleArray(
array: AnyRef,
- state: SearchState,
+ state: SearchState,
rand: Random,
drawn: OpenHashSet[Int],
length: Int): Long = {
diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
index df2d6ad3b4..1e4531ef39 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
@@ -89,9 +89,9 @@ class ExternalAppendOnlyMap[K, V, C](
// Number of bytes spilled in total
private var _diskBytesSpilled = 0L
-
+
// Use getSizeAsKb (not bytes) to maintain backwards compatibility if no units are provided
- private val fileBufferSize =
+ private val fileBufferSize =
sparkConf.getSizeAsKb("spark.shuffle.file.buffer", "32k").toInt * 1024
// Write metrics for current spill
diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
index ef2dbb7ff0..757dec66c2 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
@@ -117,7 +117,7 @@ private[spark] class ExternalSorter[K, V, C](
private val serInstance = ser.newInstance()
private val spillingEnabled = conf.getBoolean("spark.shuffle.spill", true)
-
+
// Use getSizeAsKb (not bytes) to maintain backwards compatibility if no units are provided
private val fileBufferSize = conf.getSizeAsKb("spark.shuffle.file.buffer", "32k").toInt * 1024
diff --git a/core/src/test/scala/org/apache/spark/FailureSuite.scala b/core/src/test/scala/org/apache/spark/FailureSuite.scala
index b18067e68f..a8c8c6f73f 100644
--- a/core/src/test/scala/org/apache/spark/FailureSuite.scala
+++ b/core/src/test/scala/org/apache/spark/FailureSuite.scala
@@ -117,7 +117,7 @@ class FailureSuite extends SparkFunSuite with LocalSparkContext {
sc.parallelize(1 to 10, 2).map(x => a).count()
}
assert(thrown.getClass === classOf[SparkException])
- assert(thrown.getMessage.contains("NotSerializableException") ||
+ assert(thrown.getMessage.contains("NotSerializableException") ||
thrown.getCause.getClass === classOf[NotSerializableException])
// Non-serializable closure in an earlier stage
@@ -125,7 +125,7 @@ class FailureSuite extends SparkFunSuite with LocalSparkContext {
sc.parallelize(1 to 10, 2).map(x => (x, a)).partitionBy(new HashPartitioner(3)).count()
}
assert(thrown1.getClass === classOf[SparkException])
- assert(thrown1.getMessage.contains("NotSerializableException") ||
+ assert(thrown1.getMessage.contains("NotSerializableException") ||
thrown1.getCause.getClass === classOf[NotSerializableException])
// Non-serializable closure in foreach function
@@ -133,7 +133,7 @@ class FailureSuite extends SparkFunSuite with LocalSparkContext {
sc.parallelize(1 to 10, 2).foreach(x => println(a))
}
assert(thrown2.getClass === classOf[SparkException])
- assert(thrown2.getMessage.contains("NotSerializableException") ||
+ assert(thrown2.getMessage.contains("NotSerializableException") ||
thrown2.getCause.getClass === classOf[NotSerializableException])
FailureSuiteState.clear()
diff --git a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
index e47173f8a8..4399f25626 100644
--- a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
@@ -27,11 +27,11 @@ class ImplicitOrderingSuite extends SparkFunSuite with LocalSparkContext {
// These RDD methods are in the companion object so that the unserializable ScalaTest Engine
// won't be reachable from the closure object
-
+
// Infer orderings after basic maps to particular types
val basicMapExpectations = ImplicitOrderingSuite.basicMapExpectations(rdd)
basicMapExpectations.map({case (met, explain) => assert(met, explain)})
-
+
// Infer orderings for other RDD methods
val otherRDDMethodExpectations = ImplicitOrderingSuite.otherRDDMethodExpectations(rdd)
otherRDDMethodExpectations.map({case (met, explain) => assert(met, explain)})
@@ -48,30 +48,30 @@ private object ImplicitOrderingSuite {
class OrderedClass extends Ordered[OrderedClass] {
override def compare(o: OrderedClass): Int = throw new UnsupportedOperationException
}
-
+
def basicMapExpectations(rdd: RDD[Int]): List[(Boolean, String)] = {
- List((rdd.map(x => (x, x)).keyOrdering.isDefined,
+ List((rdd.map(x => (x, x)).keyOrdering.isDefined,
"rdd.map(x => (x, x)).keyOrdering.isDefined"),
- (rdd.map(x => (1, x)).keyOrdering.isDefined,
+ (rdd.map(x => (1, x)).keyOrdering.isDefined,
"rdd.map(x => (1, x)).keyOrdering.isDefined"),
- (rdd.map(x => (x.toString, x)).keyOrdering.isDefined,
+ (rdd.map(x => (x.toString, x)).keyOrdering.isDefined,
"rdd.map(x => (x.toString, x)).keyOrdering.isDefined"),
- (rdd.map(x => (null, x)).keyOrdering.isDefined,
+ (rdd.map(x => (null, x)).keyOrdering.isDefined,
"rdd.map(x => (null, x)).keyOrdering.isDefined"),
- (rdd.map(x => (new NonOrderedClass, x)).keyOrdering.isEmpty,
+ (rdd.map(x => (new NonOrderedClass, x)).keyOrdering.isEmpty,
"rdd.map(x => (new NonOrderedClass, x)).keyOrdering.isEmpty"),
- (rdd.map(x => (new ComparableClass, x)).keyOrdering.isDefined,
+ (rdd.map(x => (new ComparableClass, x)).keyOrdering.isDefined,
"rdd.map(x => (new ComparableClass, x)).keyOrdering.isDefined"),
- (rdd.map(x => (new OrderedClass, x)).keyOrdering.isDefined,
+ (rdd.map(x => (new OrderedClass, x)).keyOrdering.isDefined,
"rdd.map(x => (new OrderedClass, x)).keyOrdering.isDefined"))
}
-
+
def otherRDDMethodExpectations(rdd: RDD[Int]): List[(Boolean, String)] = {
- List((rdd.groupBy(x => x).keyOrdering.isDefined,
+ List((rdd.groupBy(x => x).keyOrdering.isDefined,
"rdd.groupBy(x => x).keyOrdering.isDefined"),
- (rdd.groupBy(x => new NonOrderedClass).keyOrdering.isEmpty,
+ (rdd.groupBy(x => new NonOrderedClass).keyOrdering.isEmpty,
"rdd.groupBy(x => new NonOrderedClass).keyOrdering.isEmpty"),
- (rdd.groupBy(x => new ComparableClass).keyOrdering.isDefined,
+ (rdd.groupBy(x => new ComparableClass).keyOrdering.isDefined,
"rdd.groupBy(x => new ComparableClass).keyOrdering.isDefined"),
(rdd.groupBy(x => new OrderedClass).keyOrdering.isDefined,
"rdd.groupBy(x => new OrderedClass).keyOrdering.isDefined"),
diff --git a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala
index 93426822f7..6838b35ab4 100644
--- a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala
@@ -71,22 +71,22 @@ class SparkContextSuite extends SparkFunSuite with LocalSparkContext {
var sc2: SparkContext = null
SparkContext.clearActiveContext()
val conf = new SparkConf().setAppName("test").setMaster("local")
-
+
sc = SparkContext.getOrCreate(conf)
-
+
assert(sc.getConf.get("spark.app.name").equals("test"))
sc2 = SparkContext.getOrCreate(new SparkConf().setAppName("test2").setMaster("local"))
assert(sc2.getConf.get("spark.app.name").equals("test"))
assert(sc === sc2)
assert(sc eq sc2)
-
+
// Try creating second context to confirm that it's still possible, if desired
sc2 = new SparkContext(new SparkConf().setAppName("test3").setMaster("local")
.set("spark.driver.allowMultipleContexts", "true"))
-
+
sc2.stop()
}
-
+
test("BytesWritable implicit conversion is correct") {
// Regression test for SPARK-3121
val bytesWritable = new BytesWritable()
diff --git a/core/src/test/scala/org/apache/spark/rdd/JdbcRDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/JdbcRDDSuite.scala
index a8466ed8c1..08215a2baf 100644
--- a/core/src/test/scala/org/apache/spark/rdd/JdbcRDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/JdbcRDDSuite.scala
@@ -82,7 +82,7 @@ class JdbcRDDSuite extends SparkFunSuite with BeforeAndAfter with LocalSparkCont
assert(rdd.count === 100)
assert(rdd.reduce(_ + _) === 10100)
}
-
+
test("large id overflow") {
sc = new SparkContext("local", "test")
val rdd = new JdbcRDD(
diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
index d565132a06..e72285d03d 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
@@ -28,11 +28,11 @@ class MemoryUtilsSuite extends SparkFunSuite with MockitoSugar {
val sc = mock[SparkContext]
when(sc.conf).thenReturn(sparkConf)
-
+
// 384 > sc.executorMemory * 0.1 => 512 + 384 = 896
when(sc.executorMemory).thenReturn(512)
assert(MemoryUtils.calculateTotalMemory(sc) === 896)
-
+
// 384 < sc.executorMemory * 0.1 => 4096 + (4096 * 0.1) = 4505.6
when(sc.executorMemory).thenReturn(4096)
assert(MemoryUtils.calculateTotalMemory(sc) === 4505)
diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendSuite.scala
index 6f4ff0814b..68df46a41d 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendSuite.scala
@@ -79,11 +79,11 @@ class MesosSchedulerBackendSuite extends SparkFunSuite with LocalSparkContext wi
.set("spark.mesos.executor.docker.image", "spark/mock")
.set("spark.mesos.executor.docker.volumes", "/a,/b:/b,/c:/c:rw,/d:ro,/e:/e:ro")
.set("spark.mesos.executor.docker.portmaps", "80:8080,53:53:tcp")
-
+
val listenerBus = mock[LiveListenerBus]
listenerBus.post(
SparkListenerExecutorAdded(anyLong, "s1", new ExecutorInfo("host1", 2, Map.empty)))
-
+
val sc = mock[SparkContext]
when(sc.executorMemory).thenReturn(100)
when(sc.getSparkHome()).thenReturn(Option("/spark-home"))
diff --git a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala
index c32fe232cc..23a1fdb0f5 100644
--- a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerSuite.scala
@@ -36,7 +36,7 @@ class KryoSerializerSuite extends SparkFunSuite with SharedSparkContext {
test("SPARK-7392 configuration limits") {
val kryoBufferProperty = "spark.kryoserializer.buffer"
val kryoBufferMaxProperty = "spark.kryoserializer.buffer.max"
-
+
def newKryoInstance(
conf: SparkConf,
bufferSize: String = "64k",
@@ -46,7 +46,7 @@ class KryoSerializerSuite extends SparkFunSuite with SharedSparkContext {
kryoConf.set(kryoBufferMaxProperty, maxBufferSize)
new KryoSerializer(kryoConf).newInstance()
}
-
+
// test default values
newKryoInstance(conf, "64k", "64m")
// 2048m = 2097152k
@@ -69,7 +69,7 @@ class KryoSerializerSuite extends SparkFunSuite with SharedSparkContext {
// test configuration with mb is supported properly
newKryoInstance(conf, "8m", "9m")
}
-
+
test("basic types") {
val ser = new KryoSerializer(conf).newInstance()
def check[T: ClassTag](t: T) {
diff --git a/core/src/test/scala/org/apache/spark/serializer/ProactiveClosureSerializationSuite.scala b/core/src/test/scala/org/apache/spark/serializer/ProactiveClosureSerializationSuite.scala
index 77d66864f7..c657414e9e 100644
--- a/core/src/test/scala/org/apache/spark/serializer/ProactiveClosureSerializationSuite.scala
+++ b/core/src/test/scala/org/apache/spark/serializer/ProactiveClosureSerializationSuite.scala
@@ -23,7 +23,7 @@ import org.apache.spark.rdd.RDD
/* A trivial (but unserializable) container for trivial functions */
class UnserializableClass {
def op[T](x: T): String = x.toString
-
+
def pred[T](x: T): Boolean = x.toString.length % 2 == 0
}
@@ -45,7 +45,7 @@ class ProactiveClosureSerializationSuite extends SparkFunSuite with SharedSparkC
// iterating over a map from transformation names to functions that perform that
// transformation on a given RDD, creating one test case for each
- for (transformation <-
+ for (transformation <-
Map("map" -> xmap _,
"flatMap" -> xflatMap _,
"filter" -> xfilter _,
@@ -58,24 +58,24 @@ class ProactiveClosureSerializationSuite extends SparkFunSuite with SharedSparkC
val ex = intercept[SparkException] {
xf(data, uc)
}
- assert(ex.getMessage.contains("Task not serializable"),
+ assert(ex.getMessage.contains("Task not serializable"),
s"RDD.$name doesn't proactively throw NotSerializableException")
}
}
- private def xmap(x: RDD[String], uc: UnserializableClass): RDD[String] =
+ private def xmap(x: RDD[String], uc: UnserializableClass): RDD[String] =
x.map(y => uc.op(y))
- private def xflatMap(x: RDD[String], uc: UnserializableClass): RDD[String] =
+ private def xflatMap(x: RDD[String], uc: UnserializableClass): RDD[String] =
x.flatMap(y => Seq(uc.op(y)))
- private def xfilter(x: RDD[String], uc: UnserializableClass): RDD[String] =
+ private def xfilter(x: RDD[String], uc: UnserializableClass): RDD[String] =
x.filter(y => uc.pred(y))
- private def xmapPartitions(x: RDD[String], uc: UnserializableClass): RDD[String] =
+ private def xmapPartitions(x: RDD[String], uc: UnserializableClass): RDD[String] =
x.mapPartitions(_.map(y => uc.op(y)))
- private def xmapPartitionsWithIndex(x: RDD[String], uc: UnserializableClass): RDD[String] =
+ private def xmapPartitionsWithIndex(x: RDD[String], uc: UnserializableClass): RDD[String] =
x.mapPartitionsWithIndex((_, it) => it.map(y => uc.op(y)))
-
+
}
diff --git a/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala b/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
index a97a842f43..70cd27b043 100644
--- a/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/ClosureCleanerSuite.scala
@@ -201,7 +201,7 @@ object TestObjectWithNestedReturns {
def run(): Int = {
withSpark(new SparkContext("local", "test")) { sc =>
val nums = sc.parallelize(Array(1, 2, 3, 4))
- nums.map {x =>
+ nums.map {x =>
// this return is fine since it will not transfer control outside the closure
def foo(): Int = { return 5; 1 }
foo()
diff --git a/core/src/test/scala/org/apache/spark/util/random/RandomSamplerSuite.scala b/core/src/test/scala/org/apache/spark/util/random/RandomSamplerSuite.scala
index 2f1e6a39f4..d6af0aebde 100644
--- a/core/src/test/scala/org/apache/spark/util/random/RandomSamplerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/random/RandomSamplerSuite.scala
@@ -78,7 +78,7 @@ class RandomSamplerSuite extends SparkFunSuite with Matchers {
}
// Returns iterator over gap lengths between samples.
- // This function assumes input data is integers sampled from the sequence of
+ // This function assumes input data is integers sampled from the sequence of
// increasing integers: {0, 1, 2, ...}. This works because that is how I generate them,
// and the samplers preserve their input order
def gaps(data: Iterator[Int]): Iterator[Int] = {