aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorJacek Laskowski <jacek@japila.pl>2015-12-22 10:47:10 -0800
committerReynold Xin <rxin@databricks.com>2015-12-22 10:47:10 -0800
commit7c970f9093bda0a789d7d6e43c72a6d317fc3723 (patch)
tree39de68489ae116cdb36535de33c759b7ceb3425e /core/src
parentb5ce84a1bb8be26d67a2e44011a0c36375de399b (diff)
downloadspark-7c970f9093bda0a789d7d6e43c72a6d317fc3723.tar.gz
spark-7c970f9093bda0a789d7d6e43c72a6d317fc3723.tar.bz2
spark-7c970f9093bda0a789d7d6e43c72a6d317fc3723.zip
Minor corrections, i.e. typo fixes and follow deprecated
Author: Jacek Laskowski <jacek@japila.pl> Closes #10432 from jaceklaskowski/minor-corrections.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala2
5 files changed, 6 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index c4541aa376..67230f4207 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -2095,7 +2095,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
/** Default min number of partitions for Hadoop RDDs when not given by user */
@deprecated("use defaultMinPartitions", "1.0.0")
- def defaultMinSplits: Int = math.min(defaultParallelism, 2)
+ def defaultMinSplits: Int = defaultMinPartitions
/**
* Default min number of partitions for Hadoop RDDs when not given by user
diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index c2ebf30596..77c88baa9b 100644
--- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -257,7 +257,7 @@ private[spark] object CoarseGrainedExecutorBackend extends Logging {
// scalastyle:off println
System.err.println(
"""
- |"Usage: CoarseGrainedExecutorBackend [options]
+ |Usage: CoarseGrainedExecutorBackend [options]
|
| Options are:
| --driver-url <driverUrl>
diff --git a/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala b/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala
index d2e94f943a..cd6f00cc08 100644
--- a/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala
+++ b/core/src/main/scala/org/apache/spark/rpc/netty/RpcEndpointAddress.scala
@@ -26,7 +26,7 @@ import org.apache.spark.rpc.RpcAddress
* The `rpcAddress` may be null, in which case the endpoint is registered via a client-only
* connection and can only be reached via the client that sent the endpoint reference.
*
- * @param rpcAddress The socket address of the endpint.
+ * @param rpcAddress The socket address of the endpoint.
* @param name Name of the endpoint.
*/
private[netty] case class RpcEndpointAddress(val rpcAddress: RpcAddress, val name: String) {
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
index a02f3017cb..380301f1c9 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
@@ -608,7 +608,7 @@ private[spark] class TaskSetManager(
}
/**
- * Marks the task as successful and notifies the DAGScheduler that a task has ended.
+ * Marks a task as successful and notifies the DAGScheduler that the task has ended.
*/
def handleSuccessfulTask(tid: Long, result: DirectTaskResult[_]): Unit = {
val info = taskInfos(tid)
@@ -705,7 +705,7 @@ private[spark] class TaskSetManager(
ef.exception
case e: ExecutorLostFailure if !e.exitCausedByApp =>
- logInfo(s"Task $tid failed because while it was being computed, its executor" +
+ logInfo(s"Task $tid failed because while it was being computed, its executor " +
"exited for a reason unrelated to the task. Not counting this failure towards the " +
"maximum number of failures for the task.")
None
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
index 2279e8cad7..f222007a38 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
@@ -30,7 +30,7 @@ import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.ENDPOINT
import org.apache.spark.util.{ThreadUtils, SerializableBuffer, AkkaUtils, Utils}
/**
- * A scheduler backend that waits for coarse grained executors to connect to it through Akka.
+ * A scheduler backend that waits for coarse-grained executors to connect.
* This backend holds onto each executor for the duration of the Spark job rather than relinquishing
* executors whenever a task is done and asking the scheduler to launch a new executor for
* each new task. Executors may be launched in a variety of ways, such as Mesos tasks for the