aboutsummaryrefslogtreecommitdiff
path: root/yarn
diff options
context:
space:
mode:
authorThomas Graves <tgraves@apache.org>2014-09-05 09:56:22 -0500
committerThomas Graves <tgraves@apache.org>2014-09-05 09:56:22 -0500
commit62c557609929982eeec170fe12f810bedfcf97f2 (patch)
tree91f235155df9bb216aaed78fb73fbca15383726b /yarn
parent51b53a758c85f2e20ad9bd73ed815fcfa9c7180b (diff)
downloadspark-62c557609929982eeec170fe12f810bedfcf97f2.tar.gz
spark-62c557609929982eeec170fe12f810bedfcf97f2.tar.bz2
spark-62c557609929982eeec170fe12f810bedfcf97f2.zip
[SPARK-3375] spark on yarn container allocation issues
If yarn doesn't get the containers immediately it stops asking for them and the yarn application hangs with never getting any executors. The issue here is that we are sending the number of containers as 0 after we send the original one of X. on the yarn side this clears out the original request. For a ping we should just send empty asks. Author: Thomas Graves <tgraves@apache.org> Closes #2275 from tgravescs/SPARK-3375 and squashes the following commits: 74b6820 [Thomas Graves] send empty resource requests when we aren't asking for containers
Diffstat (limited to 'yarn')
-rw-r--r--yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala13
-rw-r--r--yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala8
2 files changed, 12 insertions, 9 deletions
diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
index 85d6274df2..5a1b42c1e1 100644
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
+++ b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
@@ -51,12 +51,13 @@ private[yarn] class YarnAllocationHandler(
override protected def allocateContainers(count: Int): YarnAllocateResponse = {
var resourceRequests: List[ResourceRequest] = null
- // default.
- if (count <= 0 || preferredHostToCount.isEmpty) {
- logDebug("numExecutors: " + count + ", host preferences: " +
- preferredHostToCount.isEmpty)
- resourceRequests = List(createResourceRequest(
- AllocationType.ANY, null, count, YarnSparkHadoopUtil.RM_REQUEST_PRIORITY))
+ logDebug("numExecutors: " + count)
+ if (count <= 0) {
+ resourceRequests = List()
+ } else if (preferredHostToCount.isEmpty) {
+ logDebug("host preferences is empty")
+ resourceRequests = List(createResourceRequest(
+ AllocationType.ANY, null, count, YarnSparkHadoopUtil.RM_REQUEST_PRIORITY))
} else {
// request for all hosts in preferred nodes and for numExecutors -
// candidates.size, request by default allocation policy.
diff --git a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
index c887cb52dd..5438f151ac 100644
--- a/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
+++ b/yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala
@@ -88,9 +88,11 @@ private[yarn] class YarnAllocationHandler(
private def addResourceRequests(numExecutors: Int) {
val containerRequests: List[ContainerRequest] =
- if (numExecutors <= 0 || preferredHostToCount.isEmpty) {
- logDebug("numExecutors: " + numExecutors + ", host preferences: " +
- preferredHostToCount.isEmpty)
+ if (numExecutors <= 0) {
+ logDebug("numExecutors: " + numExecutors)
+ List()
+ } else if (preferredHostToCount.isEmpty) {
+ logDebug("host preferences is empty")
createResourceRequests(
AllocationType.ANY,
resource = null,