aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorshekhar.bansal <shekhar.bansal@guavus.com>2015-05-05 11:09:51 +0100
committerSean Owen <sowen@cloudera.com>2015-05-05 11:09:51 +0100
commitfc8feaa8e94e1e611d2abb1e5e38de512961502b (patch)
tree1973d2e96fc36283f10cf6881ee32f5ee118322b
parent4d29867ede9a87b160c3d715c1fb02067feef449 (diff)
downloadspark-fc8feaa8e94e1e611d2abb1e5e38de512961502b.tar.gz
spark-fc8feaa8e94e1e611d2abb1e5e38de512961502b.tar.bz2
spark-fc8feaa8e94e1e611d2abb1e5e38de512961502b.zip
[SPARK-6653] [YARN] New config to specify port for sparkYarnAM actor system
Author: shekhar.bansal <shekhar.bansal@guavus.com> Closes #5719 from zuxqoj/master and squashes the following commits: 5574ff7 [shekhar.bansal] [SPARK-6653][yarn] New config to specify port for sparkYarnAM actor system 5117258 [shekhar.bansal] [SPARK-6653][yarn] New config to specify port for sparkYarnAM actor system 9de5330 [shekhar.bansal] [SPARK-6653][yarn] New config to specify port for sparkYarnAM actor system 456a592 [shekhar.bansal] [SPARK-6653][yarn] New configuration property to specify port for sparkYarnAM actor system 803e93e [shekhar.bansal] [SPARK-6653][yarn] New configuration property to specify port for sparkYarnAM actor system
-rw-r--r--docs/running-on-yarn.md7
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala3
2 files changed, 9 insertions, 1 deletions
diff --git a/docs/running-on-yarn.md b/docs/running-on-yarn.md
index b6701b64c2..4fb4a90307 100644
--- a/docs/running-on-yarn.md
+++ b/docs/running-on-yarn.md
@@ -134,6 +134,13 @@ Most of the configs are the same for Spark on YARN as for other deployment modes
</td>
</tr>
<tr>
+ <td><code>spark.yarn.am.port</code></td>
+ <td>(random)</td>
+ <td>
+ Port for the YARN Application Master to listen on. In YARN client mode, this is used to communicate between the Spark driver running on a gateway and the Application Master running on YARN. In YARN cluster mode, this is used for the dynamic executor feature, where it handles the kill from the scheduler backend.
+ </td>
+</tr>
+<tr>
<td><code>spark.yarn.queue</code></td>
<td>default</td>
<td>
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index e1694c1f64..29752969e6 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
@@ -285,7 +285,8 @@ private[spark] class ApplicationMaster(
}
private def runExecutorLauncher(securityMgr: SecurityManager): Unit = {
- rpcEnv = RpcEnv.create("sparkYarnAM", Utils.localHostName, 0, sparkConf, securityMgr)
+ val port = sparkConf.getInt("spark.yarn.am.port", 0)
+ rpcEnv = RpcEnv.create("sparkYarnAM", Utils.localHostName, port, sparkConf, securityMgr)
waitForSparkDriver()
addAmIpFilter()
registerAM(sparkConf.get("spark.driver.appUIAddress", ""), securityMgr)