aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala16
-rw-r--r--docs/monitoring.md24
3 files changed, 44 insertions, 0 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
index 9bdbfb33bf..498fcc520a 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
@@ -75,6 +75,10 @@ class SparkHadoopUtil {
def getSecretKeyFromUserCredentials(key: String): Array[Byte] = { null }
+ def loginUserFromKeytab(principalName: String, keytabFilename: String) {
+ UserGroupInformation.loginUserFromKeytab(principalName, keytabFilename)
+ }
+
}
object SparkHadoopUtil {
diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala
index b8f56234d3..d7a3246bcf 100644
--- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala
@@ -22,6 +22,7 @@ import scala.collection.mutable
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.spark.{Logging, SecurityManager, SparkConf}
+import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.scheduler._
import org.apache.spark.ui.{WebUI, SparkUI}
import org.apache.spark.ui.JettyUtils._
@@ -257,6 +258,7 @@ object HistoryServer {
val STATIC_RESOURCE_DIR = SparkUI.STATIC_RESOURCE_DIR
def main(argStrings: Array[String]) {
+ initSecurity()
val args = new HistoryServerArguments(argStrings)
val securityManager = new SecurityManager(conf)
val server = new HistoryServer(args.logDir, securityManager, conf)
@@ -266,6 +268,20 @@ object HistoryServer {
while(true) { Thread.sleep(Int.MaxValue) }
server.stop()
}
+
+ def initSecurity() {
+ // If we are accessing HDFS and it has security enabled (Kerberos), we have to login
+ // from a keytab file so that we can access HDFS beyond the kerberos ticket expiration.
+ // As long as it is using Hadoop rpc (hdfs://), a relogin will automatically
+ // occur from the keytab.
+ if (conf.getBoolean("spark.history.kerberos.enabled", false)) {
+ // if you have enabled kerberos the following 2 params must be set
+ val principalName = conf.get("spark.history.kerberos.principal")
+ val keytabFilename = conf.get("spark.history.kerberos.keytab")
+ SparkHadoopUtil.get.loginUserFromKeytab(principalName, keytabFilename)
+ }
+ }
+
}
diff --git a/docs/monitoring.md b/docs/monitoring.md
index 144be3daf1..347a9b1f1a 100644
--- a/docs/monitoring.md
+++ b/docs/monitoring.md
@@ -91,6 +91,30 @@ represents an application's event logs. This creates a web interface at
The port to which the web interface of the history server binds.
</td>
</tr>
+ <tr>
+ <td>spark.history.kerberos.enabled</td>
+ <td>false</td>
+ <td>
+ Indicates whether the history server should use kerberos to login. This is useful
+ if the history server is accessing HDFS files on a secure Hadoop cluster. If this is
+ true it looks uses the configs <code>spark.history.kerberos.principal</code> and
+ <code>spark.history.kerberos.keytab</code>.
+ </td>
+ </tr>
+ <tr>
+ <td>spark.history.kerberos.principal</td>
+ <td>(none)</td>
+ <td>
+ Kerberos principal name for the History Server.
+ </td>
+ </tr>
+ <tr>
+ <td>spark.history.kerberos.keytab</td>
+ <td>(none)</td>
+ <td>
+ Location of the kerberos keytab file for the History Server.
+ </td>
+ </tr>
</table>
Note that in all of these UIs, the tables are sortable by clicking their headers,