diff options
author | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-02-26 12:11:03 -0800 |
---|---|---|
committer | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-02-26 12:11:03 -0800 |
commit | ece3edfffa02f90a71569961b91bf44041f21afe (patch) | |
tree | 8a888c3fbe59a0156d393c3d9c60924e98742c5e | |
parent | 73697e289174583fe7b0e829eb8387d1a1aa8721 (diff) | |
download | spark-ece3edfffa02f90a71569961b91bf44041f21afe.tar.gz spark-ece3edfffa02f90a71569961b91bf44041f21afe.tar.bz2 spark-ece3edfffa02f90a71569961b91bf44041f21afe.zip |
Fix a problem with no hosts being counted as alive in the first job
-rw-r--r-- | core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala b/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala index d9c2f9517b..26fdef101b 100644 --- a/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala +++ b/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala @@ -140,6 +140,9 @@ private[spark] class ClusterScheduler(val sc: SparkContext) // Mark each slave as alive and remember its hostname for (o <- offers) { executorIdToHost(o.executorId) = o.hostname + if (!executorsByHost.contains(o.hostname)) { + executorsByHost(o.hostname) = new HashSet() + } } // Build a list of tasks to assign to each slave val tasks = offers.map(o => new ArrayBuffer[TaskDescription](o.cores)) @@ -159,9 +162,6 @@ private[spark] class ClusterScheduler(val sc: SparkContext) taskSetTaskIds(manager.taskSet.id) += tid taskIdToExecutorId(tid) = execId activeExecutorIds += execId - if (!executorsByHost.contains(host)) { - executorsByHost(host) = new HashSet() - } executorsByHost(host) += execId availableCpus(i) -= 1 launchedTask = true |