aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorSteve Loughran <stevel@hortonworks.com>2016-05-26 13:55:22 -0500
committerTom Graves <tgraves@yahoo-inc.com>2016-05-26 13:55:22 -0500
commit01b350a4f7c17d6516b27b6cd27ba8390834d40c (patch)
tree166d6b9a2c7bfb8cc7dbf180c29393287cf7371a /core/src
parentc76457c8e422ce6fbf72a8fe5db94565783b12d0 (diff)
downloadspark-01b350a4f7c17d6516b27b6cd27ba8390834d40c.tar.gz
spark-01b350a4f7c17d6516b27b6cd27ba8390834d40c.tar.bz2
spark-01b350a4f7c17d6516b27b6cd27ba8390834d40c.zip
[SPARK-13148][YARN] document zero-keytab Oozie application launch; add diagnostics
This patch provides detail on what to do for keytabless Oozie launches of spark apps, and adds some debug-level diagnostics of what credentials have been submitted Author: Steve Loughran <stevel@hortonworks.com> Author: Steve Loughran <stevel@apache.org> Closes #11033 from steveloughran/stevel/feature/SPARK-13148-oozie.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala51
1 files changed, 49 insertions, 2 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
index 2e9e45a155..7a5fc866bb 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
@@ -17,10 +17,11 @@
package org.apache.spark.deploy
-import java.io.{ByteArrayInputStream, DataInputStream}
+import java.io.{ByteArrayInputStream, DataInputStream, IOException}
import java.lang.reflect.Method
import java.security.PrivilegedExceptionAction
-import java.util.{Arrays, Comparator}
+import java.text.DateFormat
+import java.util.{Arrays, Comparator, Date}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
@@ -34,6 +35,8 @@ import org.apache.hadoop.fs.FileSystem.Statistics
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
+import org.apache.hadoop.security.token.{Token, TokenIdentifier}
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.annotation.DeveloperApi
@@ -357,6 +360,50 @@ class SparkHadoopUtil extends Logging {
newConf.setBoolean(confKey, true)
newConf
}
+
+ /**
+ * Dump the credentials' tokens to string values.
+ *
+ * @param credentials credentials
+ * @return an iterator over the string values. If no credentials are passed in: an empty list
+ */
+ private[spark] def dumpTokens(credentials: Credentials): Iterable[String] = {
+ if (credentials != null) {
+ credentials.getAllTokens.asScala.map(tokenToString)
+ } else {
+ Seq()
+ }
+ }
+
+ /**
+ * Convert a token to a string for logging.
+ * If its an abstract delegation token, attempt to unmarshall it and then
+ * print more details, including timestamps in human-readable form.
+ *
+ * @param token token to convert to a string
+ * @return a printable string value.
+ */
+ private[spark] def tokenToString(token: Token[_ <: TokenIdentifier]): String = {
+ val df = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT)
+ val buffer = new StringBuilder(128)
+ buffer.append(token.toString)
+ try {
+ val ti = token.decodeIdentifier
+ buffer.append("; ").append(ti)
+ ti match {
+ case dt: AbstractDelegationTokenIdentifier =>
+ // include human times and the renewer, which the HDFS tokens toString omits
+ buffer.append("; Renewer: ").append(dt.getRenewer)
+ buffer.append("; Issued: ").append(df.format(new Date(dt.getIssueDate)))
+ buffer.append("; Max Date: ").append(df.format(new Date(dt.getMaxDate)))
+ case _ =>
+ }
+ } catch {
+ case e: IOException =>
+ logDebug("Failed to decode $token: $e", e)
+ }
+ buffer.toString
+ }
}
object SparkHadoopUtil {