aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJey Kottalam <jey@cs.berkeley.edu>2013-07-24 13:07:27 -0700
committerJey Kottalam <jey@cs.berkeley.edu>2013-08-15 16:50:37 -0700
commitbd0bab47c9602462628b1d3c90d5eb5d889f4596 (patch)
treef6b77c9b351cc46e7a061ae46cb4ee8b9e7f3c22
parent4f43fd791ab0e84693e2337358c6b880a1593e54 (diff)
downloadspark-bd0bab47c9602462628b1d3c90d5eb5d889f4596.tar.gz
spark-bd0bab47c9602462628b1d3c90d5eb5d889f4596.tar.bz2
spark-bd0bab47c9602462628b1d3c90d5eb5d889f4596.zip
SparkEnv isn't available this early, and not needed anyway
-rw-r--r--core/src/main/scala/spark/deploy/SparkHadoopUtil.scala11
-rw-r--r--core/src/main/scala/spark/executor/StandaloneExecutorBackend.scala14
-rw-r--r--yarn/src/main/scala/spark/deploy/SparkHadoopUtil.scala16
3 files changed, 0 insertions, 41 deletions
diff --git a/core/src/main/scala/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/spark/deploy/SparkHadoopUtil.scala
index c4ed0bb17e..882161e669 100644
--- a/core/src/main/scala/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/spark/deploy/SparkHadoopUtil.scala
@@ -25,17 +25,6 @@ import org.apache.hadoop.mapred.JobConf
*/
class SparkHadoopUtil {
- def getUserNameFromEnvironment(): String = {
- // defaulting to -D ...
- System.getProperty("user.name")
- }
-
- def runAsUser(func: (Product) => Unit, args: Product) {
-
- // Add support, if exists - for now, simply run func !
- func(args)
- }
-
// Return an appropriate (subclass) of Configuration. Creating config can initializes some hadoop subsystems
def newConfiguration(): Configuration = new Configuration()
diff --git a/core/src/main/scala/spark/executor/StandaloneExecutorBackend.scala b/core/src/main/scala/spark/executor/StandaloneExecutorBackend.scala
index a9e06f8d54..b5fb6dbe29 100644
--- a/core/src/main/scala/spark/executor/StandaloneExecutorBackend.scala
+++ b/core/src/main/scala/spark/executor/StandaloneExecutorBackend.scala
@@ -81,20 +81,6 @@ private[spark] class StandaloneExecutorBackend(
private[spark] object StandaloneExecutorBackend {
def run(driverUrl: String, executorId: String, hostname: String, cores: Int) {
- val env = SparkEnv.get
- env.hadoop.runAsUser(run0, Tuple4[Any, Any, Any, Any] (driverUrl, executorId, hostname, cores))
- }
-
- // This will be run 'as' the user
- def run0(args: Product) {
- assert(4 == args.productArity)
- runImpl(args.productElement(0).asInstanceOf[String],
- args.productElement(1).asInstanceOf[String],
- args.productElement(2).asInstanceOf[String],
- args.productElement(3).asInstanceOf[Int])
- }
-
- private def runImpl(driverUrl: String, executorId: String, hostname: String, cores: Int) {
// Debug code
Utils.checkHost(hostname)
diff --git a/yarn/src/main/scala/spark/deploy/SparkHadoopUtil.scala b/yarn/src/main/scala/spark/deploy/SparkHadoopUtil.scala
index 6122fdced0..a812bcf867 100644
--- a/yarn/src/main/scala/spark/deploy/SparkHadoopUtil.scala
+++ b/yarn/src/main/scala/spark/deploy/SparkHadoopUtil.scala
@@ -32,22 +32,6 @@ object SparkHadoopUtil {
val yarnConf = newConfiguration()
- def getUserNameFromEnvironment(): String = {
- // defaulting to env if -D is not present ...
- val retval = System.getProperty(Environment.USER.name, System.getenv(Environment.USER.name))
-
- // If nothing found, default to user we are running as
- if (retval == null) System.getProperty("user.name") else retval
- }
-
- def runAsUser(func: (Product) => Unit, args: Product) {
- runAsUser(func, args, getUserNameFromEnvironment())
- }
-
- def runAsUser(func: (Product) => Unit, args: Product, user: String) {
- func(args)
- }
-
// Note that all params which start with SPARK are propagated all the way through, so if in yarn mode, this MUST be set to true.
def isYarnMode(): Boolean = {
val yarnMode = System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE"))