diff options
author | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-10-16 15:58:41 -0700 |
---|---|---|
committer | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-10-16 15:58:41 -0700 |
commit | f9973cae3aff39c29a2cdad5b54b7674d1126132 (patch) | |
tree | 0d0f88c2170f289377ecf680f0d84e8b03e18c65 /yarn | |
parent | 28e9c2abc0884d096fc3be1e2d1f9ee18ffc3261 (diff) | |
parent | cc7df2b3ccdee602a6a90964628676e7dc4e0954 (diff) | |
download | spark-f9973cae3aff39c29a2cdad5b54b7674d1126132.tar.gz spark-f9973cae3aff39c29a2cdad5b54b7674d1126132.tar.bz2 spark-f9973cae3aff39c29a2cdad5b54b7674d1126132.zip |
Merge pull request #65 from tgravescs/fixYarn
Fix yarn build
Fix the yarn build after renaming StandAloneX to CoarseGrainedX from pull request 34.
Diffstat (limited to 'yarn')
-rw-r--r-- | yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala index 6d6ef149cc..25da9aa917 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala @@ -22,7 +22,7 @@ import org.apache.spark.util.Utils import org.apache.spark.scheduler.SplitInfo import scala.collection import org.apache.hadoop.yarn.api.records.{AMResponse, ApplicationAttemptId, ContainerId, Priority, Resource, ResourceRequest, ContainerStatus, Container} -import org.apache.spark.scheduler.cluster.{ClusterScheduler, StandaloneSchedulerBackend} +import org.apache.spark.scheduler.cluster.{ClusterScheduler, CoarseGrainedSchedulerBackend} import org.apache.hadoop.yarn.api.protocolrecords.{AllocateRequest, AllocateResponse} import org.apache.hadoop.yarn.util.{RackResolver, Records} import java.util.concurrent.{CopyOnWriteArrayList, ConcurrentHashMap} @@ -211,7 +211,7 @@ private[yarn] class YarnAllocationHandler(val conf: Configuration, val resourceM val workerId = workerIdCounter.incrementAndGet().toString val driverUrl = "akka://spark@%s:%s/user/%s".format( System.getProperty("spark.driver.host"), System.getProperty("spark.driver.port"), - StandaloneSchedulerBackend.ACTOR_NAME) + CoarseGrainedSchedulerBackend.ACTOR_NAME) logInfo("launching container on " + containerId + " host " + workerHostname) // just to be safe, simply remove it from pendingReleaseContainers. Should not be there, but .. |