aboutsummaryrefslogtreecommitdiff
path: root/yarn
diff options
context:
space:
mode:
authorKousuke Saruta <sarutak@oss.nttdata.co.jp>2014-09-15 08:53:58 -0500
committerThomas Graves <tgraves@apache.org>2014-09-15 08:53:58 -0500
commitcc14644460872efb344e8d895859d70213a40840 (patch)
tree78746dd247e51ba53e14a36ab4b3293a81ce1ca6 /yarn
parentf493f7982b50e3c99e78b649e7c6c5b4313c5ffa (diff)
downloadspark-cc14644460872efb344e8d895859d70213a40840.tar.gz
spark-cc14644460872efb344e8d895859d70213a40840.tar.bz2
spark-cc14644460872efb344e8d895859d70213a40840.zip
[SPARK-3410] The priority of shutdownhook for ApplicationMaster should not be integer literal
I think, it need to keep the priority of shutdown hook for ApplicationMaster than the priority of shutdown hook for o.a.h.FileSystem depending on changing the priority for FileSystem. Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp> Closes #2283 from sarutak/SPARK-3410 and squashes the following commits: 1d44fef [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into SPARK-3410 bd6cc53 [Kousuke Saruta] Modified style ee6f1aa [Kousuke Saruta] Added constant "SHUTDOWN_HOOK_PRIORITY" to ApplicationMaster 54eb68f [Kousuke Saruta] Changed Shutdown hook priority to 20 2f0aee3 [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into SPARK-3410 4c5cb93 [Kousuke Saruta] Modified the priority for AM's shutdown hook 217d1a4 [Kousuke Saruta] Removed unused import statements 717aba2 [Kousuke Saruta] Modified ApplicationMaster to make to keep the priority of shutdown hook for ApplicationMaster higher than the priority of shutdown hook for HDFS
Diffstat (limited to 'yarn')
-rw-r--r--yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala13
1 files changed, 7 insertions, 6 deletions
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index 735d7723b0..cde5fff637 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
@@ -21,12 +21,8 @@ import java.io.IOException
import java.net.Socket
import java.util.concurrent.atomic.AtomicReference
-import scala.collection.JavaConversions._
-import scala.util.Try
-
import akka.actor._
import akka.remote._
-import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.util.ShutdownHookManager
import org.apache.hadoop.yarn.api._
@@ -107,8 +103,11 @@ private[spark] class ApplicationMaster(args: ApplicationMasterArguments,
}
}
}
- // Use priority 30 as it's higher than HDFS. It's the same priority MapReduce is using.
- ShutdownHookManager.get().addShutdownHook(cleanupHook, 30)
+
+ // Use higher priority than FileSystem.
+ assert(ApplicationMaster.SHUTDOWN_HOOK_PRIORITY > FileSystem.SHUTDOWN_HOOK_PRIORITY)
+ ShutdownHookManager
+ .get().addShutdownHook(cleanupHook, ApplicationMaster.SHUTDOWN_HOOK_PRIORITY)
// Call this to force generation of secret so it gets populated into the
// Hadoop UGI. This has to happen before the startUserClass which does a
@@ -407,6 +406,8 @@ private[spark] class ApplicationMaster(args: ApplicationMasterArguments,
object ApplicationMaster extends Logging {
+ val SHUTDOWN_HOOK_PRIORITY: Int = 30
+
private var master: ApplicationMaster = _
def main(args: Array[String]) = {