diff options
author | Thomas Graves <tgraves@apache.org> | 2014-02-05 23:37:07 -0800 |
---|---|---|
committer | Reynold Xin <rxin@apache.org> | 2014-02-05 23:37:07 -0800 |
commit | 38020961d101e792393855fd00d8e42f40713754 (patch) | |
tree | 00129e9b25f670be4060bf186f025e2d9ffd51c3 /yarn/stable | |
parent | 18c4ee71e27189f5f3f4eb6bfc6ad8860aa254c6 (diff) | |
download | spark-38020961d101e792393855fd00d8e42f40713754.tar.gz spark-38020961d101e792393855fd00d8e42f40713754.tar.bz2 spark-38020961d101e792393855fd00d8e42f40713754.zip |
Merge pull request #526 from tgravescs/yarn_client_stop_am_fix. Closes #526.
spark on yarn - yarn-client mode doesn't always exit immediately
https://spark-project.atlassian.net/browse/SPARK-1049
If you run in the yarn-client mode but you don't get all the workers you requested right away and then you exit your application, the application master stays around until it gets the number of workers you initially requested. This is a waste of resources. The AM should exit immediately upon the client going away.
This fix simply checks to see if the driver closed while its waiting for the initial # of workers.
Author: Thomas Graves <tgraves@apache.org>
== Merge branch commits ==
commit 03f40a62584b6bdd094ba91670cd4aa6afe7cd81
Author: Thomas Graves <tgraves@apache.org>
Date: Fri Jan 31 11:23:10 2014 -0600
spark on yarn - yarn-client mode doesn't always exit immediately
Diffstat (limited to 'yarn/stable')
-rw-r--r-- | yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala index 78353224fa..40600f38e5 100644 --- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala +++ b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala @@ -193,7 +193,7 @@ class WorkerLauncher(args: ApplicationMasterArguments, conf: Configuration, spar // TODO: Handle container failure yarnAllocator.addResourceRequests(args.numWorkers) - while (yarnAllocator.getNumWorkersRunning < args.numWorkers) { + while ((yarnAllocator.getNumWorkersRunning < args.numWorkers) && (!driverClosed)) { yarnAllocator.allocateResources() Thread.sleep(100) } |