aboutsummaryrefslogtreecommitdiff
path: root/core/src/main
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2016-11-29 16:27:25 -0800
committerJosh Rosen <joshrosen@databricks.com>2016-11-29 16:27:25 -0800
commit9a02f6821265ff67ba3f7b095cd1afaebd25a898 (patch)
treef5e74a63361b88bd4f91844347978a4e1c24ca00 /core/src/main
parent3600635215f25d695c9be5931b5185fec8a35527 (diff)
downloadspark-9a02f6821265ff67ba3f7b095cd1afaebd25a898.tar.gz
spark-9a02f6821265ff67ba3f7b095cd1afaebd25a898.tar.bz2
spark-9a02f6821265ff67ba3f7b095cd1afaebd25a898.zip
[SPARK-18553][CORE] Fix leak of TaskSetManager following executor loss
## What changes were proposed in this pull request? _This is the master branch version of #15986; the original description follows:_ This patch fixes a critical resource leak in the TaskScheduler which could cause RDDs and ShuffleDependencies to be kept alive indefinitely if an executor with running tasks is permanently lost and the associated stage fails. This problem was originally identified by analyzing the heap dump of a driver belonging to a cluster that had run out of shuffle space. This dump contained several `ShuffleDependency` instances that were retained by `TaskSetManager`s inside the scheduler but were not otherwise referenced. Each of these `TaskSetManager`s was considered a "zombie" but had no running tasks and therefore should have been cleaned up. However, these zombie task sets were still referenced by the `TaskSchedulerImpl.taskIdToTaskSetManager` map. Entries are added to the `taskIdToTaskSetManager` map when tasks are launched and are removed inside of `TaskScheduler.statusUpdate()`, which is invoked by the scheduler backend while processing `StatusUpdate` messages from executors. The problem with this design is that a completely dead executor will never send a `StatusUpdate`. There is [some code](https://github.com/apache/spark/blob/072f4c518cdc57d705beec6bcc3113d9a6740819/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala#L338) in `statusUpdate` which handles tasks that exit with the `TaskState.LOST` state (which is supposed to correspond to a task failure triggered by total executor loss), but this state only seems to be used in Mesos fine-grained mode. There doesn't seem to be any code which performs per-task state cleanup for tasks that were running on an executor that completely disappears without sending any sort of final death message. The `executorLost` and [`removeExecutor`](https://github.com/apache/spark/blob/072f4c518cdc57d705beec6bcc3113d9a6740819/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala#L527) methods don't appear to perform any cleanup of the `taskId -> *` mappings, causing the leaks observed here. This patch's fix is to maintain a `executorId -> running task id` mapping so that these `taskId -> *` maps can be properly cleaned up following an executor loss. There are some potential corner-case interactions that I'm concerned about here, especially some details in [the comment](https://github.com/apache/spark/blob/072f4c518cdc57d705beec6bcc3113d9a6740819/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala#L523) in `removeExecutor`, so I'd appreciate a very careful review of these changes. ## How was this patch tested? I added a new unit test to `TaskSchedulerImplSuite`. /cc kayousterhout and markhamstra, who reviewed #15986. Author: Josh Rosen <joshrosen@databricks.com> Closes #16045 from JoshRosen/fix-leak-following-total-executor-loss-master.
Diffstat (limited to 'core/src/main')
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala82
1 files changed, 49 insertions, 33 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index 3e3f1ad031..67446da0a8 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -93,10 +93,12 @@ private[spark] class TaskSchedulerImpl(
// Incrementing task IDs
val nextTaskId = new AtomicLong(0)
- // Number of tasks running on each executor
- private val executorIdToTaskCount = new HashMap[String, Int]
+ // IDs of the tasks running on each executor
+ private val executorIdToRunningTaskIds = new HashMap[String, HashSet[Long]]
- def runningTasksByExecutors(): Map[String, Int] = executorIdToTaskCount.toMap
+ def runningTasksByExecutors(): Map[String, Int] = {
+ executorIdToRunningTaskIds.toMap.mapValues(_.size)
+ }
// The set of executors we have on each host; this is used to compute hostsAlive, which
// in turn is used to decide when we can attain data locality on a given host
@@ -264,7 +266,7 @@ private[spark] class TaskSchedulerImpl(
val tid = task.taskId
taskIdToTaskSetManager(tid) = taskSet
taskIdToExecutorId(tid) = execId
- executorIdToTaskCount(execId) += 1
+ executorIdToRunningTaskIds(execId).add(tid)
availableCpus(i) -= CPUS_PER_TASK
assert(availableCpus(i) >= 0)
launchedTask = true
@@ -294,11 +296,11 @@ private[spark] class TaskSchedulerImpl(
if (!hostToExecutors.contains(o.host)) {
hostToExecutors(o.host) = new HashSet[String]()
}
- if (!executorIdToTaskCount.contains(o.executorId)) {
+ if (!executorIdToRunningTaskIds.contains(o.executorId)) {
hostToExecutors(o.host) += o.executorId
executorAdded(o.executorId, o.host)
executorIdToHost(o.executorId) = o.host
- executorIdToTaskCount(o.executorId) = 0
+ executorIdToRunningTaskIds(o.executorId) = HashSet[Long]()
newExecAvail = true
}
for (rack <- getRackForHost(o.host)) {
@@ -349,38 +351,34 @@ private[spark] class TaskSchedulerImpl(
var reason: Option[ExecutorLossReason] = None
synchronized {
try {
- if (state == TaskState.LOST && taskIdToExecutorId.contains(tid)) {
- // We lost this entire executor, so remember that it's gone
- val execId = taskIdToExecutorId(tid)
-
- if (executorIdToTaskCount.contains(execId)) {
- reason = Some(
- SlaveLost(s"Task $tid was lost, so marking the executor as lost as well."))
- removeExecutor(execId, reason.get)
- failedExecutor = Some(execId)
- }
- }
taskIdToTaskSetManager.get(tid) match {
case Some(taskSet) =>
- if (TaskState.isFinished(state)) {
- taskIdToTaskSetManager.remove(tid)
- taskIdToExecutorId.remove(tid).foreach { execId =>
- if (executorIdToTaskCount.contains(execId)) {
- executorIdToTaskCount(execId) -= 1
- }
+ if (state == TaskState.LOST) {
+ // TaskState.LOST is only used by the deprecated Mesos fine-grained scheduling mode,
+ // where each executor corresponds to a single task, so mark the executor as failed.
+ val execId = taskIdToExecutorId.getOrElse(tid, throw new IllegalStateException(
+ "taskIdToTaskSetManager.contains(tid) <=> taskIdToExecutorId.contains(tid)"))
+ if (executorIdToRunningTaskIds.contains(execId)) {
+ reason = Some(
+ SlaveLost(s"Task $tid was lost, so marking the executor as lost as well."))
+ removeExecutor(execId, reason.get)
+ failedExecutor = Some(execId)
}
}
- if (state == TaskState.FINISHED) {
- taskSet.removeRunningTask(tid)
- taskResultGetter.enqueueSuccessfulTask(taskSet, tid, serializedData)
- } else if (Set(TaskState.FAILED, TaskState.KILLED, TaskState.LOST).contains(state)) {
+ if (TaskState.isFinished(state)) {
+ cleanupTaskState(tid)
taskSet.removeRunningTask(tid)
- taskResultGetter.enqueueFailedTask(taskSet, tid, state, serializedData)
+ if (state == TaskState.FINISHED) {
+ taskResultGetter.enqueueSuccessfulTask(taskSet, tid, serializedData)
+ } else if (Set(TaskState.FAILED, TaskState.KILLED, TaskState.LOST).contains(state)) {
+ taskResultGetter.enqueueFailedTask(taskSet, tid, state, serializedData)
+ }
}
case None =>
logError(
("Ignoring update with state %s for TID %s because its task set is gone (this is " +
- "likely the result of receiving duplicate task finished status updates)")
+ "likely the result of receiving duplicate task finished status updates) or its " +
+ "executor has been marked as failed.")
.format(state, tid))
}
} catch {
@@ -491,7 +489,7 @@ private[spark] class TaskSchedulerImpl(
var failedExecutor: Option[String] = None
synchronized {
- if (executorIdToTaskCount.contains(executorId)) {
+ if (executorIdToRunningTaskIds.contains(executorId)) {
val hostPort = executorIdToHost(executorId)
logExecutorLoss(executorId, hostPort, reason)
removeExecutor(executorId, reason)
@@ -534,12 +532,30 @@ private[spark] class TaskSchedulerImpl(
}
/**
+ * Cleans up the TaskScheduler's state for tracking the given task.
+ */
+ private def cleanupTaskState(tid: Long): Unit = {
+ taskIdToTaskSetManager.remove(tid)
+ taskIdToExecutorId.remove(tid).foreach { executorId =>
+ executorIdToRunningTaskIds.get(executorId).foreach { _.remove(tid) }
+ }
+ }
+
+ /**
* Remove an executor from all our data structures and mark it as lost. If the executor's loss
* reason is not yet known, do not yet remove its association with its host nor update the status
* of any running tasks, since the loss reason defines whether we'll fail those tasks.
*/
private def removeExecutor(executorId: String, reason: ExecutorLossReason) {
- executorIdToTaskCount -= executorId
+ // The tasks on the lost executor may not send any more status updates (because the executor
+ // has been lost), so they should be cleaned up here.
+ executorIdToRunningTaskIds.remove(executorId).foreach { taskIds =>
+ logDebug("Cleaning up TaskScheduler state for tasks " +
+ s"${taskIds.mkString("[", ",", "]")} on failed executor $executorId")
+ // We do not notify the TaskSetManager of the task failures because that will
+ // happen below in the rootPool.executorLost() call.
+ taskIds.foreach(cleanupTaskState)
+ }
val host = executorIdToHost(executorId)
val execs = hostToExecutors.getOrElse(host, new HashSet)
@@ -577,11 +593,11 @@ private[spark] class TaskSchedulerImpl(
}
def isExecutorAlive(execId: String): Boolean = synchronized {
- executorIdToTaskCount.contains(execId)
+ executorIdToRunningTaskIds.contains(execId)
}
def isExecutorBusy(execId: String): Boolean = synchronized {
- executorIdToTaskCount.getOrElse(execId, -1) > 0
+ executorIdToRunningTaskIds.get(execId).exists(_.nonEmpty)
}
// By default, rack is unknown