diff options
author | Reynold Xin <reynoldx@gmail.com> | 2013-09-08 23:36:48 -0700 |
---|---|---|
committer | Reynold Xin <reynoldx@gmail.com> | 2013-09-08 23:36:48 -0700 |
commit | e9d4f44a7adfaf55d0fd312b81350638310c341d (patch) | |
tree | bb63e61bf926f801cd4ee32ec92e76570bd3c9ef | |
parent | 2447b1c4e6118a1f11901b36e8f9459d30fdc5c7 (diff) | |
parent | f9b7f58de20fed0447f7d77499ec89ad6a188a2d (diff) | |
download | spark-e9d4f44a7adfaf55d0fd312b81350638310c341d.tar.gz spark-e9d4f44a7adfaf55d0fd312b81350638310c341d.tar.bz2 spark-e9d4f44a7adfaf55d0fd312b81350638310c341d.zip |
Merge pull request #909 from mateiz/exec-id-fix
Fix an instance where full standalone mode executor IDs were passed to
-rw-r--r-- | core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala | 4 | ||||
-rw-r--r-- | core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala | 10 |
2 files changed, 7 insertions, 7 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala index 1b31c8c57e..0ac3d7bcfd 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala @@ -335,7 +335,7 @@ private[spark] class ClusterTaskSetManager( } /** - * Respond to an offer of a single slave from the scheduler by finding a task + * Respond to an offer of a single executor from the scheduler by finding a task */ override def resourceOffer( execId: String, @@ -358,7 +358,7 @@ private[spark] class ClusterTaskSetManager( val task = tasks(index) val taskId = sched.newTaskId() // Figure out whether this should count as a preferred launch - logInfo("Starting task %s:%d as TID %s on slave %s: %s (%s)".format( + logInfo("Starting task %s:%d as TID %s on executor %s: %s (%s)".format( taskSet.id, index, taskId, execId, host, taskLocality)) // Do various bookkeeping copiesRunning(index) += 1 diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala index 9a2cf20de7..9c49768c0c 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala @@ -76,17 +76,17 @@ private[spark] class SparkDeploySchedulerBackend( } } - override def executorAdded(executorId: String, workerId: String, hostPort: String, cores: Int, memory: Int) { + override def executorAdded(fullId: String, workerId: String, hostPort: String, cores: Int, memory: Int) { logInfo("Granted executor ID %s on hostPort %s with %d cores, %s RAM".format( - executorId, hostPort, cores, Utils.megabytesToString(memory))) + fullId, hostPort, cores, Utils.megabytesToString(memory))) } - override def executorRemoved(executorId: String, message: String, exitStatus: Option[Int]) { + override def executorRemoved(fullId: String, message: String, exitStatus: Option[Int]) { val reason: ExecutorLossReason = exitStatus match { case Some(code) => ExecutorExited(code) case None => SlaveLost(message) } - logInfo("Executor %s removed: %s".format(executorId, message)) - removeExecutor(executorId, reason.toString) + logInfo("Executor %s removed: %s".format(fullId, message)) + removeExecutor(fullId.split("/")(1), reason.toString) } } |