aboutsummaryrefslogtreecommitdiff
path: root/new-yarn
diff options
context:
space:
mode:
authorThomas Graves <tgraves@apache.org>2014-01-02 17:11:16 -0600
committerThomas Graves <tgraves@apache.org>2014-01-02 17:11:16 -0600
commitfced7885cb6cd09761578f960540d739bcbb465a (patch)
treef1c39d9a7f40cd0833d62ee56d787221dae73f2a /new-yarn
parentc6de982be69cd50e66375cfea3d6c3267a01583b (diff)
downloadspark-fced7885cb6cd09761578f960540d739bcbb465a.tar.gz
spark-fced7885cb6cd09761578f960540d739bcbb465a.tar.bz2
spark-fced7885cb6cd09761578f960540d739bcbb465a.zip
fix yarn-client
Diffstat (limited to 'new-yarn')
-rw-r--r--new-yarn/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala9
1 files changed, 5 insertions, 4 deletions
diff --git a/new-yarn/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala b/new-yarn/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala
index 32c774c90e..99b824e129 100644
--- a/new-yarn/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala
+++ b/new-yarn/src/main/scala/org/apache/spark/deploy/yarn/WorkerLauncher.scala
@@ -47,9 +47,10 @@ class WorkerLauncher(args: ApplicationMasterArguments, conf: Configuration) exte
private var driverClosed:Boolean = false
private var amClient: AMRMClient[ContainerRequest] = _
+ private val sparkConf = new SparkConf
val actorSystem : ActorSystem = AkkaUtils.createActorSystem("sparkYarnAM", Utils.localHostName, 0,
- conf = new SparkConf)._1
+ conf = sparkConf)._1
var actor: ActorRef = _
// This actor just working as a monitor to watch on Driver Actor.
@@ -137,8 +138,8 @@ class WorkerLauncher(args: ApplicationMasterArguments, conf: Configuration) exte
Thread.sleep(100)
}
}
- conf.set("spark.driver.host", driverHost)
- conf.set("spark.driver.port", driverPort.toString)
+ sparkConf.set("spark.driver.host", driverHost)
+ sparkConf.set("spark.driver.port", driverPort.toString)
val driverUrl = "akka.tcp://spark@%s:%s/user/%s".format(
driverHost, driverPort.toString, CoarseGrainedSchedulerBackend.ACTOR_NAME)
@@ -159,7 +160,7 @@ class WorkerLauncher(args: ApplicationMasterArguments, conf: Configuration) exte
appAttemptId,
args,
preferredNodeLocationData,
- new SparkConf)
+ sparkConf)
logInfo("Allocating " + args.numWorkers + " workers.")
// Wait until all containers have finished