From 2a2c9645e4ea08cd1408151a33d2d52f6752404a Mon Sep 17 00:00:00 2001 From: Sandy Ryza Date: Mon, 10 Mar 2014 17:42:33 -0700 Subject: SPARK-1211. In ApplicationMaster, set spark.master system property to "y... ...arn-cluster" Author: Sandy Ryza Closes #118 from sryza/sandy-spark-1211 and squashes the following commits: d4001c7 [Sandy Ryza] SPARK-1211. In ApplicationMaster, set spark.master system property to "yarn-cluster" --- .../main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala | 3 +++ 1 file changed, 3 insertions(+) (limited to 'yarn/stable') diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala index b48a2d50db..57d1577429 100644 --- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala +++ b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala @@ -82,6 +82,9 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration, // other spark processes running on the same box System.setProperty("spark.ui.port", "0") + // when running the AM, the Spark master is always "yarn-cluster" + System.setProperty("spark.master", "yarn-cluster") + // Use priority 30 as it's higher then HDFS. It's same priority as MapReduce is using. ShutdownHookManager.get().addShutdownHook(new AppMasterShutdownHook(this), 30) -- cgit v1.2.3