aboutsummaryrefslogtreecommitdiff
path: root/core/src/hadoop2-yarn/scala/spark/deploy/SparkHadoopUtil.scala
blob: 301a57fffa2ca31df37eb7ca5da49a8555e30b6c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
package spark.deploy

import collection.mutable.HashMap
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import java.security.PrivilegedExceptionAction

/**
 * Contains util methods to interact with Hadoop from spark.
 */
object SparkHadoopUtil {

  val yarnConf = newConfiguration()

  def getUserNameFromEnvironment(): String = {
    // defaulting to env if -D is not present ...
    val retval = System.getProperty(Environment.USER.name, System.getenv(Environment.USER.name))

    // If nothing found, default to user we are running as
    if (retval == null) System.getProperty("user.name") else retval
  }

  def runAsUser(func: (Product) => Unit, args: Product) {
    runAsUser(func, args, getUserNameFromEnvironment())
  }

  def runAsUser(func: (Product) => Unit, args: Product, user: String) {
    func(args)
  }

  // Note that all params which start with SPARK are propagated all the way through, so if in yarn mode, this MUST be set to true.
  def isYarnMode(): Boolean = {
    val yarnMode = System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE"))
    java.lang.Boolean.valueOf(yarnMode)
  }

  // Set an env variable indicating we are running in YARN mode.
  // Note that anything with SPARK prefix gets propagated to all (remote) processes
  def setYarnMode() {
    System.setProperty("SPARK_YARN_MODE", "true")
  }

  def setYarnMode(env: HashMap[String, String]) {
    env("SPARK_YARN_MODE") = "true"
  }

  // Return an appropriate (subclass) of Configuration. Creating config can initializes some hadoop subsystems
  // Always create a new config, dont reuse yarnConf.
  def newConfiguration(): Configuration = new YarnConfiguration(new Configuration())

  // add any user credentials to the job conf which are necessary for running on a secure Hadoop cluster
  def addCredentials(conf: JobConf) {
    val jobCreds = conf.getCredentials();
    jobCreds.mergeAll(UserGroupInformation.getCurrentUser().getCredentials())
  }
}