aboutsummaryrefslogtreecommitdiff
path: root/yarn/stable
diff options
context:
space:
mode:
authorSandy Ryza <sandy@cloudera.com>2014-07-21 13:15:46 -0500
committerThomas Graves <tgraves@apache.org>2014-07-21 13:15:46 -0500
commitf89cf65d7aced0bb387c05586f9f51cb29865022 (patch)
treeabf25e4025ad61c86bb04814abadc7710b3b13eb /yarn/stable
parentcd273a238144a9a436219cd01250369586f5638b (diff)
downloadspark-f89cf65d7aced0bb387c05586f9f51cb29865022.tar.gz
spark-f89cf65d7aced0bb387c05586f9f51cb29865022.tar.bz2
spark-f89cf65d7aced0bb387c05586f9f51cb29865022.zip
SPARK-1707. Remove unnecessary 3 second sleep in YarnClusterScheduler
Author: Sandy Ryza <sandy@cloudera.com> Closes #634 from sryza/sandy-spark-1707 and squashes the following commits: 2f6e358 [Sandy Ryza] Default min registered executors ratio to .8 for YARN 354c630 [Sandy Ryza] Remove outdated comments c744ef3 [Sandy Ryza] Take out waitForInitialAllocations 2a4329b [Sandy Ryza] SPARK-1707. Remove unnecessary 3 second sleep in YarnClusterScheduler
Diffstat (limited to 'yarn/stable')
-rw-r--r--yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala43
1 files changed, 0 insertions, 43 deletions
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index 1a24ec759b..eaf594c8b4 100644
--- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
@@ -234,10 +234,6 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
sparkContext.getConf)
}
}
- } finally {
- // In case of exceptions, etc - ensure that the loop in
- // ApplicationMaster#sparkContextInitialized() breaks.
- ApplicationMaster.doneWithInitialAllocations()
}
}
@@ -254,16 +250,9 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
checkNumExecutorsFailed()
allocateMissingExecutor()
yarnAllocator.allocateResources()
- if (iters == ApplicationMaster.ALLOCATOR_LOOP_WAIT_COUNT) {
- ApplicationMaster.doneWithInitialAllocations()
- }
Thread.sleep(ApplicationMaster.ALLOCATE_HEARTBEAT_INTERVAL)
iters += 1
}
- } finally {
- // In case of exceptions, etc - ensure that the loop in
- // ApplicationMaster#sparkContextInitialized() breaks.
- ApplicationMaster.doneWithInitialAllocations()
}
logInfo("All executors have launched.")
}
@@ -365,12 +354,8 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
}
object ApplicationMaster extends Logging {
- // Number of times to wait for the allocator loop to complete.
- // Each loop iteration waits for 100ms, so maximum of 3 seconds.
- // This is to ensure that we have reasonable number of containers before we start
// TODO: Currently, task to container is computed once (TaskSetManager) - which need not be
// optimal as more containers are available. Might need to handle this better.
- private val ALLOCATOR_LOOP_WAIT_COUNT = 30
private val ALLOCATE_HEARTBEAT_INTERVAL = 100
private val applicationMasters = new CopyOnWriteArrayList[ApplicationMaster]()
@@ -378,20 +363,6 @@ object ApplicationMaster extends Logging {
val sparkContextRef: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
- // Variable used to notify the YarnClusterScheduler that it should stop waiting
- // for the initial set of executors to be started and get on with its business.
- val doneWithInitialAllocationsMonitor = new Object()
-
- @volatile var isDoneWithInitialAllocations = false
-
- def doneWithInitialAllocations() {
- isDoneWithInitialAllocations = true
- doneWithInitialAllocationsMonitor.synchronized {
- // to wake threads off wait ...
- doneWithInitialAllocationsMonitor.notifyAll()
- }
- }
-
def register(master: ApplicationMaster) {
applicationMasters.add(master)
}
@@ -434,20 +405,6 @@ object ApplicationMaster extends Logging {
modified
}
- /**
- * Returns when we've either
- * 1) received all the requested executors,
- * 2) waited ALLOCATOR_LOOP_WAIT_COUNT * ALLOCATE_HEARTBEAT_INTERVAL ms,
- * 3) hit an error that causes us to terminate trying to get containers.
- */
- def waitForInitialAllocations() {
- doneWithInitialAllocationsMonitor.synchronized {
- while (!isDoneWithInitialAllocations) {
- doneWithInitialAllocationsMonitor.wait(1000L)
- }
- }
- }
-
def getApplicationAttemptId(): ApplicationAttemptId = {
val containerIdString = System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name())
val containerId = ConverterUtils.toContainerId(containerIdString)