aboutsummaryrefslogtreecommitdiff
path: root/yarn/src
diff options
context:
space:
mode:
Diffstat (limited to 'yarn/src')
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala7
-rw-r--r--yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala12
2 files changed, 15 insertions, 4 deletions
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index 70cb57ffd8..27f804782f 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
@@ -89,6 +89,10 @@ private[spark] class ApplicationMaster(
// Propagate the application ID so that YarnClusterSchedulerBackend can pick it up.
System.setProperty("spark.yarn.app.id", appAttemptId.getApplicationId().toString())
+
+ // Propagate the attempt if, so that in case of event logging,
+ // different attempt's logs gets created in different directory
+ System.setProperty("spark.yarn.app.attemptId", appAttemptId.getAttemptId().toString())
}
logInfo("ApplicationAttemptId: " + appAttemptId)
@@ -208,10 +212,11 @@ private[spark] class ApplicationMaster(
val sc = sparkContextRef.get()
val appId = client.getAttemptId().getApplicationId().toString()
+ val attemptId = client.getAttemptId().getAttemptId().toString()
val historyAddress =
sparkConf.getOption("spark.yarn.historyServer.address")
.map { text => SparkHadoopUtil.get.substituteHadoopVariables(text, yarnConf) }
- .map { address => s"${address}${HistoryServer.UI_PATH_PREFIX}/${appId}" }
+ .map { address => s"${address}${HistoryServer.UI_PATH_PREFIX}/${appId}/${attemptId}" }
.getOrElse("")
allocator = client.register(yarnConf,
diff --git a/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala
index b1de81e6a8..aeb218a575 100644
--- a/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala
+++ b/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala
@@ -39,12 +39,18 @@ private[spark] class YarnClusterSchedulerBackend(
}
override def applicationId(): String =
- // In YARN Cluster mode, spark.yarn.app.id is expect to be set
- // before user application is launched.
- // So, if spark.yarn.app.id is not set, it is something wrong.
+ // In YARN Cluster mode, the application ID is expected to be set, so log an error if it's
+ // not found.
sc.getConf.getOption("spark.yarn.app.id").getOrElse {
logError("Application ID is not set.")
super.applicationId
}
+ override def applicationAttemptId(): Option[String] =
+ // In YARN Cluster mode, the attempt ID is expected to be set, so log an error if it's
+ // not found.
+ sc.getConf.getOption("spark.yarn.app.attemptId").orElse {
+ logError("Application attempt ID is not set.")
+ super.applicationAttemptId
+ }
}