aboutsummaryrefslogtreecommitdiff
path: root/yarn
diff options
context:
space:
mode:
authorSandy Ryza <sandy@cloudera.com>2014-12-09 11:02:43 -0800
committerAndrew Or <andrew@databricks.com>2014-12-09 11:02:43 -0800
commit912563aa3553afc0871d5b5858f533aa39cb99e5 (patch)
tree092241ac4c78deef8053f5095bc9680b3c8532cd /yarn
parent383c5555c9f26c080bc9e3a463aab21dd5b3797f (diff)
downloadspark-912563aa3553afc0871d5b5858f533aa39cb99e5.tar.gz
spark-912563aa3553afc0871d5b5858f533aa39cb99e5.tar.bz2
spark-912563aa3553afc0871d5b5858f533aa39cb99e5.zip
SPARK-4338. [YARN] Ditch yarn-alpha.
Sorry if this is a little premature with 1.2 still not out the door, but it will make other work like SPARK-4136 and SPARK-2089 a lot easier. Author: Sandy Ryza <sandy@cloudera.com> Closes #3215 from sryza/sandy-spark-4338 and squashes the following commits: 1c5ac08 [Sandy Ryza] Update building Spark docs and remove unnecessary newline 9c1421c [Sandy Ryza] SPARK-4338. Ditch yarn-alpha.
Diffstat (limited to 'yarn')
-rw-r--r--yarn/README.md12
-rw-r--r--yarn/alpha/pom.xml35
-rw-r--r--yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/Client.scala145
-rw-r--r--yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala139
-rw-r--r--yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala229
-rw-r--r--yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala118
-rw-r--r--yarn/pom.xml129
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala (renamed from yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/Client.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala)36
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala (renamed from yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnableUtil.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnableUtil.scala)7
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala (renamed from yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClient.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClient.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala (renamed from yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala (renamed from yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala)10
-rw-r--r--yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientClusterScheduler.scala (renamed from yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientClusterScheduler.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala (renamed from yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala (renamed from yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala)0
-rw-r--r--yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala (renamed from yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala)0
-rw-r--r--yarn/src/test/resources/log4j.properties (renamed from yarn/stable/src/test/resources/log4j.properties)0
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientBaseSuite.scala (renamed from yarn/common/src/test/scala/org/apache/spark/deploy/yarn/ClientBaseSuite.scala)0
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManagerSuite.scala (renamed from yarn/common/src/test/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManagerSuite.scala)0
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala (renamed from yarn/common/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala)0
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala (renamed from yarn/stable/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala)0
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala (renamed from yarn/common/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala)0
-rw-r--r--yarn/stable/pom.xml95
31 files changed, 85 insertions, 870 deletions
diff --git a/yarn/README.md b/yarn/README.md
deleted file mode 100644
index 65ee85447e..0000000000
--- a/yarn/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# YARN DIRECTORY LAYOUT
-
-Hadoop Yarn related codes are organized in separate directories to minimize duplicated code.
-
- * common : Common codes that do not depending on specific version of Hadoop.
-
- * alpha / stable : Codes that involve specific version of Hadoop YARN API.
-
- alpha represents 0.23 and 2.0.x
- stable represents 2.2 and later, until the API changes again.
-
-alpha / stable will build together with common dir into a single jar
diff --git a/yarn/alpha/pom.xml b/yarn/alpha/pom.xml
deleted file mode 100644
index 40e9e99c6f..0000000000
--- a/yarn/alpha/pom.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ~ Licensed to the Apache Software Foundation (ASF) under one or more
- ~ contributor license agreements. See the NOTICE file distributed with
- ~ this work for additional information regarding copyright ownership.
- ~ The ASF licenses this file to You under the Apache License, Version 2.0
- ~ (the "License"); you may not use this file except in compliance with
- ~ the License. You may obtain a copy of the License at
- ~
- ~ http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing, software
- ~ distributed under the License is distributed on an "AS IS" BASIS,
- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ~ See the License for the specific language governing permissions and
- ~ limitations under the License.
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.spark</groupId>
- <artifactId>yarn-parent_2.10</artifactId>
- <version>1.3.0-SNAPSHOT</version>
- <relativePath>../pom.xml</relativePath>
- </parent>
- <properties>
- <sbt.project.name>yarn-alpha</sbt.project.name>
- </properties>
-
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-yarn-alpha_2.10</artifactId>
- <packaging>jar</packaging>
- <name>Spark Project YARN Alpha API</name>
-
-</project>
diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
deleted file mode 100644
index 73b705ba50..0000000000
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.deploy.yarn
-
-import java.nio.ByteBuffer
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.io.DataOutputBuffer
-import org.apache.hadoop.security.UserGroupInformation
-import org.apache.hadoop.yarn.api._
-import org.apache.hadoop.yarn.api.protocolrecords._
-import org.apache.hadoop.yarn.api.records._
-import org.apache.hadoop.yarn.client.YarnClientImpl
-import org.apache.hadoop.yarn.conf.YarnConfiguration
-import org.apache.hadoop.yarn.util.Records
-
-import org.apache.spark.{Logging, SparkConf}
-import org.apache.spark.deploy.SparkHadoopUtil
-
-/**
- * Version of [[org.apache.spark.deploy.yarn.ClientBase]] tailored to YARN's alpha API.
- */
-@deprecated("use yarn/stable", "1.2.0")
-private[spark] class Client(
- val args: ClientArguments,
- val hadoopConf: Configuration,
- val sparkConf: SparkConf)
- extends YarnClientImpl with ClientBase with Logging {
-
- def this(clientArgs: ClientArguments, spConf: SparkConf) =
- this(clientArgs, SparkHadoopUtil.get.newConfiguration(spConf), spConf)
-
- def this(clientArgs: ClientArguments) = this(clientArgs, new SparkConf())
-
- val yarnConf: YarnConfiguration = new YarnConfiguration(hadoopConf)
-
- /* ------------------------------------------------------------------------------------- *
- | The following methods have much in common in the stable and alpha versions of Client, |
- | but cannot be implemented in the parent trait due to subtle API differences across |
- | hadoop versions. |
- * ------------------------------------------------------------------------------------- */
-
- /** Submit an application running our ApplicationMaster to the ResourceManager. */
- override def submitApplication(): ApplicationId = {
- init(yarnConf)
- start()
-
- logInfo("Requesting a new application from cluster with %d NodeManagers"
- .format(getYarnClusterMetrics.getNumNodeManagers))
-
- // Get a new application from our RM
- val newAppResponse = getNewApplication()
- val appId = newAppResponse.getApplicationId()
-
- // Verify whether the cluster has enough resources for our AM
- verifyClusterResources(newAppResponse)
-
- // Set up the appropriate contexts to launch our AM
- val containerContext = createContainerLaunchContext(newAppResponse)
- val appContext = createApplicationSubmissionContext(appId, containerContext)
-
- // Finally, submit and monitor the application
- logInfo(s"Submitting application ${appId.getId} to ResourceManager")
- submitApplication(appContext)
- appId
- }
-
- /**
- * Set up a context for launching our ApplicationMaster container.
- * In the Yarn alpha API, the memory requirements of this container must be set in
- * the ContainerLaunchContext instead of the ApplicationSubmissionContext.
- */
- override def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
- : ContainerLaunchContext = {
- val containerContext = super.createContainerLaunchContext(newAppResponse)
- val capability = Records.newRecord(classOf[Resource])
- capability.setMemory(args.amMemory + amMemoryOverhead)
- containerContext.setResource(capability)
- containerContext
- }
-
- /** Set up the context for submitting our ApplicationMaster. */
- def createApplicationSubmissionContext(
- appId: ApplicationId,
- containerContext: ContainerLaunchContext): ApplicationSubmissionContext = {
- val appContext = Records.newRecord(classOf[ApplicationSubmissionContext])
- appContext.setApplicationId(appId)
- appContext.setApplicationName(args.appName)
- appContext.setQueue(args.amQueue)
- appContext.setAMContainerSpec(containerContext)
- appContext.setUser(UserGroupInformation.getCurrentUser.getShortUserName)
- appContext
- }
-
- /**
- * Set up security tokens for launching our ApplicationMaster container.
- * ContainerLaunchContext#setContainerTokens is renamed `setTokens` in the stable API.
- */
- override def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = {
- val dob = new DataOutputBuffer()
- credentials.writeTokenStorageToStream(dob)
- amContainer.setContainerTokens(ByteBuffer.wrap(dob.getData()))
- }
-
- /**
- * Return the security token used by this client to communicate with the ApplicationMaster.
- * If no security is enabled, the token returned by the report is null.
- * ApplicationReport#getClientToken is renamed `getClientToAMToken` in the stable API.
- */
- override def getClientToken(report: ApplicationReport): String =
- Option(report.getClientToken).map(_.toString).getOrElse("")
-}
-
-object Client {
- def main(argStrings: Array[String]) {
- if (!sys.props.contains("SPARK_SUBMIT")) {
- println("WARNING: This client is deprecated and will be removed in a " +
- "future version of Spark. Use ./bin/spark-submit with \"--master yarn\"")
- }
- println("WARNING: Support for YARN-alpha API's will be removed in Spark 1.3 (see SPARK-3445)")
-
- // Set an env variable indicating we are running in YARN mode.
- // Note that any env variable with the SPARK_ prefix gets propagated to all (remote) processes
- System.setProperty("SPARK_YARN_MODE", "true")
- val sparkConf = new SparkConf
-
- val args = new ClientArguments(argStrings, sparkConf)
- new Client(args, sparkConf).run()
- }
-}
diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
deleted file mode 100644
index 7023a11706..0000000000
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.deploy.yarn
-
-import java.net.URI
-import java.nio.ByteBuffer
-import java.security.PrivilegedExceptionAction
-
-import scala.collection.JavaConversions._
-import scala.collection.mutable.HashMap
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.io.DataOutputBuffer
-import org.apache.hadoop.net.NetUtils
-import org.apache.hadoop.security.UserGroupInformation
-import org.apache.hadoop.yarn.api._
-import org.apache.hadoop.yarn.api.records._
-import org.apache.hadoop.yarn.api.protocolrecords._
-import org.apache.hadoop.yarn.conf.YarnConfiguration
-import org.apache.hadoop.yarn.ipc.YarnRPC
-import org.apache.hadoop.yarn.util.{Apps, ConverterUtils, Records, ProtoUtils}
-
-import org.apache.spark.{SecurityManager, SparkConf, Logging}
-import org.apache.spark.network.util.JavaUtils
-
-@deprecated("use yarn/stable", "1.2.0")
-class ExecutorRunnable(
- container: Container,
- conf: Configuration,
- spConf: SparkConf,
- masterAddress: String,
- slaveId: String,
- hostname: String,
- executorMemory: Int,
- executorCores: Int,
- appAttemptId: String,
- securityMgr: SecurityManager)
- extends Runnable with ExecutorRunnableUtil with Logging {
-
- var rpc: YarnRPC = YarnRPC.create(conf)
- var cm: ContainerManager = _
- val sparkConf = spConf
- val yarnConf: YarnConfiguration = new YarnConfiguration(conf)
-
- def run = {
- logInfo("Starting Executor Container")
- cm = connectToCM
- startContainer
- }
-
- def startContainer = {
- logInfo("Setting up ContainerLaunchContext")
-
- val ctx = Records.newRecord(classOf[ContainerLaunchContext])
- .asInstanceOf[ContainerLaunchContext]
-
- ctx.setContainerId(container.getId())
- ctx.setResource(container.getResource())
- val localResources = prepareLocalResources
- ctx.setLocalResources(localResources)
-
- val env = prepareEnvironment
- ctx.setEnvironment(env)
-
- ctx.setUser(UserGroupInformation.getCurrentUser().getShortUserName())
-
- val credentials = UserGroupInformation.getCurrentUser().getCredentials()
- val dob = new DataOutputBuffer()
- credentials.writeTokenStorageToStream(dob)
- ctx.setContainerTokens(ByteBuffer.wrap(dob.getData()))
-
- val commands = prepareCommand(masterAddress, slaveId, hostname, executorMemory, executorCores,
- appAttemptId, localResources)
- logInfo("Setting up executor with commands: " + commands)
- ctx.setCommands(commands)
-
- ctx.setApplicationACLs(YarnSparkHadoopUtil.getApplicationAclsForYarn(securityMgr))
-
- // If external shuffle service is enabled, register with the Yarn shuffle service already
- // started on the NodeManager and, if authentication is enabled, provide it with our secret
- // key for fetching shuffle files later
- if (sparkConf.getBoolean("spark.shuffle.service.enabled", false)) {
- val secretString = securityMgr.getSecretKey()
- val secretBytes =
- if (secretString != null) {
- // This conversion must match how the YarnShuffleService decodes our secret
- JavaUtils.stringToBytes(secretString)
- } else {
- // Authentication is not enabled, so just provide dummy metadata
- ByteBuffer.allocate(0)
- }
- ctx.setServiceData(Map[String, ByteBuffer]("spark_shuffle" -> secretBytes))
- }
-
- // Send the start request to the ContainerManager
- val startReq = Records.newRecord(classOf[StartContainerRequest])
- .asInstanceOf[StartContainerRequest]
- startReq.setContainerLaunchContext(ctx)
- cm.startContainer(startReq)
- }
-
- def connectToCM: ContainerManager = {
- val cmHostPortStr = container.getNodeId().getHost() + ":" + container.getNodeId().getPort()
- val cmAddress = NetUtils.createSocketAddr(cmHostPortStr)
- logInfo("Connecting to ContainerManager at " + cmHostPortStr)
-
- // Use doAs and remoteUser here so we can add the container token and not pollute the current
- // users credentials with all of the individual container tokens
- val user = UserGroupInformation.createRemoteUser(container.getId().toString())
- val containerToken = container.getContainerToken()
- if (containerToken != null) {
- user.addToken(ProtoUtils.convertFromProtoFormat(containerToken, cmAddress))
- }
-
- val proxy = user
- .doAs(new PrivilegedExceptionAction[ContainerManager] {
- def run: ContainerManager = {
- rpc.getProxy(classOf[ContainerManager], cmAddress, conf).asInstanceOf[ContainerManager]
- }
- })
- proxy
- }
-
-}
diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
deleted file mode 100644
index abd37834ed..0000000000
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.deploy.yarn
-
-import java.util.concurrent.CopyOnWriteArrayList
-import java.util.concurrent.atomic.AtomicInteger
-
-import scala.collection.JavaConversions._
-import scala.collection.mutable.{ArrayBuffer, HashMap}
-
-import org.apache.spark.{SecurityManager, SparkConf}
-import org.apache.spark.scheduler.SplitInfo
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.yarn.api.AMRMProtocol
-import org.apache.hadoop.yarn.api.records._
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest
-import org.apache.hadoop.yarn.util.Records
-
-/**
- * Acquires resources for executors from a ResourceManager and launches executors in new containers.
- */
-private[yarn] class YarnAllocationHandler(
- conf: Configuration,
- sparkConf: SparkConf,
- resourceManager: AMRMProtocol,
- appAttemptId: ApplicationAttemptId,
- args: ApplicationMasterArguments,
- preferredNodes: collection.Map[String, collection.Set[SplitInfo]],
- securityMgr: SecurityManager)
- extends YarnAllocator(conf, sparkConf, appAttemptId, args, preferredNodes, securityMgr) {
-
- private val lastResponseId = new AtomicInteger()
- private val releaseList: CopyOnWriteArrayList[ContainerId] = new CopyOnWriteArrayList()
-
- override protected def allocateContainers(count: Int, pending: Int): YarnAllocateResponse = {
- var resourceRequests: List[ResourceRequest] = null
-
- logDebug("asking for additional executors: " + count + " with already pending: " + pending)
- val totalNumAsk = count + pending
- if (count <= 0) {
- resourceRequests = List()
- } else if (preferredHostToCount.isEmpty) {
- logDebug("host preferences is empty")
- resourceRequests = List(createResourceRequest(
- AllocationType.ANY, null, totalNumAsk, YarnSparkHadoopUtil.RM_REQUEST_PRIORITY))
- } else {
- // request for all hosts in preferred nodes and for numExecutors -
- // candidates.size, request by default allocation policy.
- val hostContainerRequests: ArrayBuffer[ResourceRequest] =
- new ArrayBuffer[ResourceRequest](preferredHostToCount.size)
- for ((candidateHost, candidateCount) <- preferredHostToCount) {
- val requiredCount = candidateCount - allocatedContainersOnHost(candidateHost)
-
- if (requiredCount > 0) {
- hostContainerRequests += createResourceRequest(
- AllocationType.HOST,
- candidateHost,
- requiredCount,
- YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
- }
- }
- val rackContainerRequests: List[ResourceRequest] = createRackResourceRequests(
- hostContainerRequests.toList)
-
- val anyContainerRequests: ResourceRequest = createResourceRequest(
- AllocationType.ANY,
- resource = null,
- totalNumAsk,
- YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
-
- val containerRequests: ArrayBuffer[ResourceRequest] = new ArrayBuffer[ResourceRequest](
- hostContainerRequests.size + rackContainerRequests.size + 1)
-
- containerRequests ++= hostContainerRequests
- containerRequests ++= rackContainerRequests
- containerRequests += anyContainerRequests
-
- resourceRequests = containerRequests.toList
- }
-
- val req = Records.newRecord(classOf[AllocateRequest])
- req.setResponseId(lastResponseId.incrementAndGet)
- req.setApplicationAttemptId(appAttemptId)
-
- req.addAllAsks(resourceRequests)
-
- val releasedContainerList = createReleasedContainerList()
- req.addAllReleases(releasedContainerList)
-
- if (count > 0) {
- logInfo("Allocating %d executor containers with %d of memory each.".format(totalNumAsk,
- executorMemory + memoryOverhead))
- } else {
- logDebug("Empty allocation req .. release : " + releasedContainerList)
- }
-
- for (request <- resourceRequests) {
- logInfo("ResourceRequest (host : %s, num containers: %d, priority = %s , capability : %s)".
- format(
- request.getHostName,
- request.getNumContainers,
- request.getPriority,
- request.getCapability))
- }
- new AlphaAllocateResponse(resourceManager.allocate(req).getAMResponse())
- }
-
- override protected def releaseContainer(container: Container) = {
- releaseList.add(container.getId())
- }
-
- private def createRackResourceRequests(hostContainers: List[ResourceRequest]):
- List[ResourceRequest] = {
- // First generate modified racks and new set of hosts under it : then issue requests
- val rackToCounts = new HashMap[String, Int]()
-
- // Within this lock - used to read/write to the rack related maps too.
- for (container <- hostContainers) {
- val candidateHost = container.getHostName
- val candidateNumContainers = container.getNumContainers
- assert(YarnSparkHadoopUtil.ANY_HOST != candidateHost)
-
- val rack = YarnSparkHadoopUtil.lookupRack(conf, candidateHost)
- if (rack != null) {
- var count = rackToCounts.getOrElse(rack, 0)
- count += candidateNumContainers
- rackToCounts.put(rack, count)
- }
- }
-
- val requestedContainers: ArrayBuffer[ResourceRequest] =
- new ArrayBuffer[ResourceRequest](rackToCounts.size)
- for ((rack, count) <- rackToCounts){
- requestedContainers +=
- createResourceRequest(AllocationType.RACK, rack, count,
- YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
- }
-
- requestedContainers.toList
- }
-
- private def createResourceRequest(
- requestType: AllocationType.AllocationType,
- resource:String,
- numExecutors: Int,
- priority: Int): ResourceRequest = {
-
- // If hostname specified, we need atleast two requests - node local and rack local.
- // There must be a third request - which is ANY : that will be specially handled.
- requestType match {
- case AllocationType.HOST => {
- assert(YarnSparkHadoopUtil.ANY_HOST != resource)
- val hostname = resource
- val nodeLocal = createResourceRequestImpl(hostname, numExecutors, priority)
-
- // Add to host->rack mapping
- YarnSparkHadoopUtil.populateRackInfo(conf, hostname)
-
- nodeLocal
- }
- case AllocationType.RACK => {
- val rack = resource
- createResourceRequestImpl(rack, numExecutors, priority)
- }
- case AllocationType.ANY => createResourceRequestImpl(
- YarnSparkHadoopUtil.ANY_HOST, numExecutors, priority)
- case _ => throw new IllegalArgumentException(
- "Unexpected/unsupported request type: " + requestType)
- }
- }
-
- private def createResourceRequestImpl(
- hostname:String,
- numExecutors: Int,
- priority: Int): ResourceRequest = {
-
- val rsrcRequest = Records.newRecord(classOf[ResourceRequest])
- val memCapability = Records.newRecord(classOf[Resource])
- // There probably is some overhead here, let's reserve a bit more memory.
- memCapability.setMemory(executorMemory + memoryOverhead)
- rsrcRequest.setCapability(memCapability)
-
- val pri = Records.newRecord(classOf[Priority])
- pri.setPriority(priority)
- rsrcRequest.setPriority(pri)
-
- rsrcRequest.setHostName(hostname)
-
- rsrcRequest.setNumContainers(java.lang.Math.max(numExecutors, 0))
- rsrcRequest
- }
-
- private def createReleasedContainerList(): ArrayBuffer[ContainerId] = {
- val retval = new ArrayBuffer[ContainerId](1)
- // Iterator on COW list ...
- for (container <- releaseList.iterator()){
- retval += container
- }
- // Remove from the original list.
- if (!retval.isEmpty) {
- releaseList.removeAll(retval)
- logInfo("Releasing " + retval.size + " containers.")
- }
- retval
- }
-
- private class AlphaAllocateResponse(response: AMResponse) extends YarnAllocateResponse {
- override def getAllocatedContainers() = response.getAllocatedContainers()
- override def getAvailableResources() = response.getAvailableResources()
- override def getCompletedContainersStatuses() = response.getCompletedContainersStatuses()
- }
-
-}
diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
deleted file mode 100644
index e342cc82f4..0000000000
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.deploy.yarn
-
-import scala.collection.{Map, Set}
-import java.net.URI
-
-import org.apache.hadoop.net.NetUtils
-import org.apache.hadoop.yarn.api._
-import org.apache.hadoop.yarn.api.records._
-import org.apache.hadoop.yarn.api.protocolrecords._
-import org.apache.hadoop.yarn.conf.YarnConfiguration
-import org.apache.hadoop.yarn.ipc.YarnRPC
-import org.apache.hadoop.yarn.util.{ConverterUtils, Records}
-
-import org.apache.spark.{Logging, SecurityManager, SparkConf}
-import org.apache.spark.scheduler.SplitInfo
-import org.apache.spark.util.Utils
-
-/**
- * YarnRMClient implementation for the Yarn alpha API.
- */
-private class YarnRMClientImpl(args: ApplicationMasterArguments) extends YarnRMClient with Logging {
-
- private var rpc: YarnRPC = null
- private var resourceManager: AMRMProtocol = _
- private var uiHistoryAddress: String = _
- private var registered: Boolean = false
-
- override def register(
- conf: YarnConfiguration,
- sparkConf: SparkConf,
- preferredNodeLocations: Map[String, Set[SplitInfo]],
- uiAddress: String,
- uiHistoryAddress: String,
- securityMgr: SecurityManager) = {
- this.rpc = YarnRPC.create(conf)
- this.uiHistoryAddress = uiHistoryAddress
-
- synchronized {
- resourceManager = registerWithResourceManager(conf)
- registerApplicationMaster(uiAddress)
- registered = true
- }
-
- new YarnAllocationHandler(conf, sparkConf, resourceManager, getAttemptId(), args,
- preferredNodeLocations, securityMgr)
- }
-
- override def getAttemptId() = {
- val envs = System.getenv()
- val containerIdString = envs.get(ApplicationConstants.AM_CONTAINER_ID_ENV)
- val containerId = ConverterUtils.toContainerId(containerIdString)
- val appAttemptId = containerId.getApplicationAttemptId()
- appAttemptId
- }
-
- override def unregister(status: FinalApplicationStatus, diagnostics: String = "") = synchronized {
- if (registered) {
- val finishReq = Records.newRecord(classOf[FinishApplicationMasterRequest])
- .asInstanceOf[FinishApplicationMasterRequest]
- finishReq.setAppAttemptId(getAttemptId())
- finishReq.setFinishApplicationStatus(status)
- finishReq.setDiagnostics(diagnostics)
- finishReq.setTrackingUrl(uiHistoryAddress)
- resourceManager.finishApplicationMaster(finishReq)
- }
- }
-
- override def getAmIpFilterParams(conf: YarnConfiguration, proxyBase: String) = {
- val proxy = YarnConfiguration.getProxyHostAndPort(conf)
- val parts = proxy.split(":")
- val uriBase = "http://" + proxy + proxyBase
- Map("PROXY_HOST" -> parts(0), "PROXY_URI_BASE" -> uriBase)
- }
-
- override def getMaxRegAttempts(conf: YarnConfiguration) =
- conf.getInt(YarnConfiguration.RM_AM_MAX_RETRIES, YarnConfiguration.DEFAULT_RM_AM_MAX_RETRIES)
-
- private def registerWithResourceManager(conf: YarnConfiguration): AMRMProtocol = {
- val rmAddress = NetUtils.createSocketAddr(conf.get(YarnConfiguration.RM_SCHEDULER_ADDRESS,
- YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS))
- logInfo("Connecting to ResourceManager at " + rmAddress)
- rpc.getProxy(classOf[AMRMProtocol], rmAddress, conf).asInstanceOf[AMRMProtocol]
- }
-
- private def registerApplicationMaster(uiAddress: String): RegisterApplicationMasterResponse = {
- val appMasterRequest = Records.newRecord(classOf[RegisterApplicationMasterRequest])
- .asInstanceOf[RegisterApplicationMasterRequest]
- appMasterRequest.setApplicationAttemptId(getAttemptId())
- // Setting this to master host,port - so that the ApplicationReport at client has some
- // sensible info.
- // Users can then monitor stderr/stdout on that node if required.
- appMasterRequest.setHost(Utils.localHostName())
- appMasterRequest.setRpcPort(0)
- // remove the scheme from the url if it exists since Hadoop does not expect scheme
- val uri = new URI(uiAddress)
- val authority = if (uri.getScheme == null) uiAddress else uri.getAuthority
- appMasterRequest.setTrackingUrl(authority)
- resourceManager.registerApplicationMaster(appMasterRequest)
- }
-
-}
diff --git a/yarn/pom.xml b/yarn/pom.xml
index bba73648c7..d7579bf962 100644
--- a/yarn/pom.xml
+++ b/yarn/pom.xml
@@ -25,9 +25,9 @@
</parent>
<groupId>org.apache.spark</groupId>
- <artifactId>yarn-parent_2.10</artifactId>
- <packaging>pom</packaging>
- <name>Spark Project YARN Parent POM</name>
+ <artifactId>spark-yarn_2.10</artifactId>
+ <packaging>jar</packaging>
+ <name>Spark Project YARN</name>
<properties>
<sbt.project.name>yarn</sbt.project.name>
</properties>
@@ -59,6 +59,12 @@
<artifactId>hadoop-client</artifactId>
</dependency>
<dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-tests</artifactId>
+ <classifier>tests</classifier>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest_${scala.binary.version}</artifactId>
<scope>test</scope>
@@ -70,41 +76,54 @@
</dependency>
</dependencies>
+ <!--
+ See SPARK-3710. hadoop-yarn-server-tests in Hadoop 2.2 fails to pull some needed
+ dependencies, so they need to be added manually for the tests to work.
+ -->
<profiles>
<profile>
- <id>yarn-alpha</id>
- <build>
- <plugins>
- <plugin>
- <artifactId>maven-antrun-plugin</artifactId>
- <executions>
- <execution>
- <phase>validate</phase>
- <goals>
- <goal>run</goal>
- </goals>
- <configuration>
- <tasks>
- <echo>*******************************************************************************************</echo>
- <echo>***WARNING***: Support for YARN-alpha API's will be removed in Spark 1.3 (see SPARK-3445).*</echo>
- <echo>*******************************************************************************************</echo>
- </tasks>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- <modules>
- <module>alpha</module>
- </modules>
- </profile>
-
- <profile>
- <id>yarn</id>
- <modules>
- <module>stable</module>
- </modules>
+ <id>hadoop-2.2</id>
+ <properties>
+ <jersey.version>1.9</jersey.version>
+ </properties>
+ <dependencies>
+ <dependency>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jetty</artifactId>
+ <version>6.1.26</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>servlet-api</artifactId>
+ </exclusion>
+ </exclusions>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-json</artifactId>
+ <version>${jersey.version}</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>stax</groupId>
+ <artifactId>stax-api</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-server</artifactId>
+ <version>${jersey.version}</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
</profile>
</profiles>
@@ -125,38 +144,6 @@
</configuration>
</plugin>
<plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>build-helper-maven-plugin</artifactId>
- <executions>
- <execution>
- <id>add-scala-sources</id>
- <phase>generate-sources</phase>
- <goals>
- <goal>add-source</goal>
- </goals>
- <configuration>
- <sources>
- <source>src/main/scala</source>
- <source>../common/src/main/scala</source>
- </sources>
- </configuration>
- </execution>
- <execution>
- <id>add-scala-test-sources</id>
- <phase>generate-test-sources</phase>
- <goals>
- <goal>add-test-source</goal>
- </goals>
- <configuration>
- <sources>
- <source>src/test/scala</source>
- <source>../common/src/test/scala</source>
- </sources>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
<groupId>org.scalatest</groupId>
<artifactId>scalatest-maven-plugin</artifactId>
<configuration>
@@ -169,12 +156,6 @@
<outputDirectory>target/scala-${scala.binary.version}/classes</outputDirectory>
<testOutputDirectory>target/scala-${scala.binary.version}/test-classes</testOutputDirectory>
-
- <resources>
- <resource>
- <directory>../common/src/main/resources</directory>
- </resource>
- </resources>
</build>
</project>
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index 987b3373fb..987b3373fb 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala
index d76a63276d..d76a63276d 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMasterArguments.scala
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
index addaddb711..addaddb711 100644
--- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
index 4d859450ef..c439969510 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala
@@ -178,21 +178,25 @@ private[spark] class ClientArguments(args: Array[String], sparkConf: SparkConf)
private def getUsageMessage(unknownParam: List[String] = null): String = {
val message = if (unknownParam != null) s"Unknown/unsupported param $unknownParam\n" else ""
- message +
- "Usage: org.apache.spark.deploy.yarn.Client [options] \n" +
- "Options:\n" +
- " --jar JAR_PATH Path to your application's JAR file (required in yarn-cluster mode)\n" +
- " --class CLASS_NAME Name of your application's main class (required)\n" +
- " --arg ARG Argument to be passed to your application's main class.\n" +
- " Multiple invocations are possible, each will be passed in order.\n" +
- " --num-executors NUM Number of executors to start (Default: 2)\n" +
- " --executor-cores NUM Number of cores for the executors (Default: 1).\n" +
- " --driver-memory MEM Memory for driver (e.g. 1000M, 2G) (Default: 512 Mb)\n" +
- " --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G)\n" +
- " --name NAME The name of your application (Default: Spark)\n" +
- " --queue QUEUE The hadoop queue to use for allocation requests (Default: 'default')\n" +
- " --addJars jars Comma separated list of local jars that want SparkContext.addJar to work with.\n" +
- " --files files Comma separated list of files to be distributed with the job.\n" +
- " --archives archives Comma separated list of archives to be distributed with the job."
+ message + """
+ |Usage: org.apache.spark.deploy.yarn.Client [options]
+ |Options:
+ | --jar JAR_PATH Path to your application's JAR file (required in yarn-cluster
+ | mode)
+ | --class CLASS_NAME Name of your application's main class (required)
+ | --arg ARG Argument to be passed to your application's main class.
+ | Multiple invocations are possible, each will be passed in order.
+ | --num-executors NUM Number of executors to start (Default: 2)
+ | --executor-cores NUM Number of cores for the executors (Default: 1).
+ | --driver-memory MEM Memory for driver (e.g. 1000M, 2G) (Default: 512 Mb)
+ | --executor-memory MEM Memory per executor (e.g. 1000M, 2G) (Default: 1G)
+ | --name NAME The name of your application (Default: Spark)
+ | --queue QUEUE The hadoop queue to use for allocation requests (Default:
+ | 'default')
+ | --addJars jars Comma separated list of local jars that want SparkContext.addJar
+ | to work with.
+ | --files files Comma separated list of files to be distributed with the job.
+ | --archives archives Comma separated list of archives to be distributed with the job.
+ """
}
}
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
index f95d723791..f95d723791 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala
index c592ecfdfc..c592ecfdfc 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManager.scala
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
index fdd3c2300f..fdd3c2300f 100644
--- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnableUtil.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnableUtil.scala
index 88dad0febd..22d73ecf6d 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnableUtil.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnableUtil.scala
@@ -93,12 +93,13 @@ trait ExecutorRunnableUtil extends Logging {
/*
else {
// If no java_opts specified, default to using -XX:+CMSIncrementalMode
- // It might be possible that other modes/config is being done in spark.executor.extraJavaOptions,
- // so we dont want to mess with it.
+ // It might be possible that other modes/config is being done in
+ // spark.executor.extraJavaOptions, so we dont want to mess with it.
// In our expts, using (default) throughput collector has severe perf ramnifications in
// multi-tennent machines
// The options are based on
- // http://www.oracle.com/technetwork/java/gc-tuning-5-138395.html#0.0.0.%20When%20to%20Use%20the%20Concurrent%20Low%20Pause%20Collector|outline
+ // http://www.oracle.com/technetwork/java/gc-tuning-5-138395.html#0.0.0.%20When%20to%20Use
+ // %20the%20Concurrent%20Low%20Pause%20Collector|outline
javaOpts += " -XX:+UseConcMarkSweepGC "
javaOpts += " -XX:+CMSIncrementalMode "
javaOpts += " -XX:+CMSIncrementalPacing "
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
index 2bbf5d7db8..2bbf5d7db8 100644
--- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
index b32e15738f..b32e15738f 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClient.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClient.scala
index 2510b9c9ce..2510b9c9ce 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClient.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClient.scala
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
index 8d4b96ed79..8d4b96ed79 100644
--- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
index 7d453ecb79..d7cf904db1 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
@@ -48,15 +48,17 @@ class YarnSparkHadoopUtil extends SparkHadoopUtil {
dest.addCredentials(source.getCredentials())
}
- // Note that all params which start with SPARK are propagated all the way through, so if in yarn mode, this MUST be set to true.
+ // Note that all params which start with SPARK are propagated all the way through, so if in yarn
+ // mode, this MUST be set to true.
override def isYarnMode(): Boolean = { true }
- // Return an appropriate (subclass) of Configuration. Creating config can initializes some hadoop subsystems
- // Always create a new config, dont reuse yarnConf.
+ // Return an appropriate (subclass) of Configuration. Creating a config initializes some Hadoop
+ // subsystems. Always create a new config, dont reuse yarnConf.
override def newConfiguration(conf: SparkConf): Configuration =
new YarnConfiguration(super.newConfiguration(conf))
- // add any user credentials to the job conf which are necessary for running on a secure Hadoop cluster
+ // Add any user credentials to the job conf which are necessary for running on a secure Hadoop
+ // cluster
override def addCredentials(conf: JobConf) {
val jobCreds = conf.getCredentials()
jobCreds.mergeAll(UserGroupInformation.getCurrentUser().getCredentials())
diff --git a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientClusterScheduler.scala b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientClusterScheduler.scala
index 254774a6b8..254774a6b8 100644
--- a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientClusterScheduler.scala
+++ b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientClusterScheduler.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala
index 2923e6729c..2923e6729c 100644
--- a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala
+++ b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala
index 4157ff95c2..4157ff95c2 100644
--- a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala
+++ b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterScheduler.scala
diff --git a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala
index b1de81e6a8..b1de81e6a8 100644
--- a/yarn/common/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala
+++ b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala
diff --git a/yarn/stable/src/test/resources/log4j.properties b/yarn/src/test/resources/log4j.properties
index 9dd05f17f0..9dd05f17f0 100644
--- a/yarn/stable/src/test/resources/log4j.properties
+++ b/yarn/src/test/resources/log4j.properties
diff --git a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/ClientBaseSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientBaseSuite.scala
index 17b79ae1d8..17b79ae1d8 100644
--- a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/ClientBaseSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientBaseSuite.scala
diff --git a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManagerSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManagerSuite.scala
index 80b57d1355..80b57d1355 100644
--- a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManagerSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientDistributedCacheManagerSuite.scala
diff --git a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
index 8d184a09d6..8d184a09d6 100644
--- a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
diff --git a/yarn/stable/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
index d79b85e867..d79b85e867 100644
--- a/yarn/stable/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
diff --git a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala
index 2cc5abb3a8..2cc5abb3a8 100644
--- a/yarn/common/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtilSuite.scala
diff --git a/yarn/stable/pom.xml b/yarn/stable/pom.xml
deleted file mode 100644
index 8b6521ad7f..0000000000
--- a/yarn/stable/pom.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ~ Licensed to the Apache Software Foundation (ASF) under one or more
- ~ contributor license agreements. See the NOTICE file distributed with
- ~ this work for additional information regarding copyright ownership.
- ~ The ASF licenses this file to You under the Apache License, Version 2.0
- ~ (the "License"); you may not use this file except in compliance with
- ~ the License. You may obtain a copy of the License at
- ~
- ~ http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing, software
- ~ distributed under the License is distributed on an "AS IS" BASIS,
- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ~ See the License for the specific language governing permissions and
- ~ limitations under the License.
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.spark</groupId>
- <artifactId>yarn-parent_2.10</artifactId>
- <version>1.3.0-SNAPSHOT</version>
- <relativePath>../pom.xml</relativePath>
- </parent>
- <properties>
- <sbt.project.name>yarn-stable</sbt.project.name>
- </properties>
-
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-yarn_2.10</artifactId>
- <packaging>jar</packaging>
- <name>Spark Project YARN Stable API</name>
-
- <dependencies>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-yarn-server-tests</artifactId>
- <classifier>tests</classifier>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
- <!--
- See SPARK-3710. hadoop-yarn-server-tests in Hadoop 2.2 fails to pull some needed
- dependencies, so they need to be added manually for the tests to work.
- -->
- <profiles>
- <profile>
- <id>hadoop-2.2</id>
- <properties>
- <jersey.version>1.9</jersey.version>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.mortbay.jetty</groupId>
- <artifactId>jetty</artifactId>
- <version>6.1.26</version>
- <exclusions>
- <exclusion>
- <groupId>org.mortbay.jetty</groupId>
- <artifactId>servlet-api</artifactId>
- </exclusion>
- </exclusions>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-core</artifactId>
- <version>${jersey.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-json</artifactId>
- <version>${jersey.version}</version>
- <scope>test</scope>
- <exclusions>
- <exclusion>
- <groupId>stax</groupId>
- <artifactId>stax-api</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-server</artifactId>
- <version>${jersey.version}</version>
- <scope>test</scope>
- </dependency>
- </dependencies>
- </profile>
- </profiles>
-
-</project>