diff options
author | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-09-08 18:27:50 -0700 |
---|---|---|
committer | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-09-08 18:27:50 -0700 |
commit | f9b7f58de20fed0447f7d77499ec89ad6a188a2d (patch) | |
tree | b21866d3fad7d1c89b102f53e8ec6b98f9644f0d | |
parent | 0b957997ada10fcfa96e42780482d986bf7e4885 (diff) | |
download | spark-f9b7f58de20fed0447f7d77499ec89ad6a188a2d.tar.gz spark-f9b7f58de20fed0447f7d77499ec89ad6a188a2d.tar.bz2 spark-f9b7f58de20fed0447f7d77499ec89ad6a188a2d.zip |
Fix an instance where full standalone mode executor IDs were passed to
StandaloneSchedulerBackend instead of the smaller IDs used within Spark
(that lack the application name).
This was reported by ClearStory in
https://github.com/clearstorydata/spark/pull/9.
Also fixed some messages that said slave instead of executor.
-rw-r--r-- | core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala | 4 | ||||
-rw-r--r-- | core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala | 10 |
2 files changed, 7 insertions, 7 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala index 1b31c8c57e..0ac3d7bcfd 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala @@ -335,7 +335,7 @@ private[spark] class ClusterTaskSetManager( } /** - * Respond to an offer of a single slave from the scheduler by finding a task + * Respond to an offer of a single executor from the scheduler by finding a task */ override def resourceOffer( execId: String, @@ -358,7 +358,7 @@ private[spark] class ClusterTaskSetManager( val task = tasks(index) val taskId = sched.newTaskId() // Figure out whether this should count as a preferred launch - logInfo("Starting task %s:%d as TID %s on slave %s: %s (%s)".format( + logInfo("Starting task %s:%d as TID %s on executor %s: %s (%s)".format( taskSet.id, index, taskId, execId, host, taskLocality)) // Do various bookkeeping copiesRunning(index) += 1 diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala index 9a2cf20de7..9c49768c0c 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala @@ -76,17 +76,17 @@ private[spark] class SparkDeploySchedulerBackend( } } - override def executorAdded(executorId: String, workerId: String, hostPort: String, cores: Int, memory: Int) { + override def executorAdded(fullId: String, workerId: String, hostPort: String, cores: Int, memory: Int) { logInfo("Granted executor ID %s on hostPort %s with %d cores, %s RAM".format( - executorId, hostPort, cores, Utils.megabytesToString(memory))) + fullId, hostPort, cores, Utils.megabytesToString(memory))) } - override def executorRemoved(executorId: String, message: String, exitStatus: Option[Int]) { + override def executorRemoved(fullId: String, message: String, exitStatus: Option[Int]) { val reason: ExecutorLossReason = exitStatus match { case Some(code) => ExecutorExited(code) case None => SlaveLost(message) } - logInfo("Executor %s removed: %s".format(executorId, message)) - removeExecutor(executorId, reason.toString) + logInfo("Executor %s removed: %s".format(fullId, message)) + removeExecutor(fullId.split("/")(1), reason.toString) } } |