aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala/org/apache/hadoop/mapreduce/SparkHadoopMapReduceUtil.scala
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2014-11-09 22:11:20 -0800
committerPatrick Wendell <pwendell@gmail.com>2014-11-09 22:11:20 -0800
commitf8e5732307dcb1482d9bcf1162a1090ef9a7b913 (patch)
treeff4bdd9ef7f96d458b4aafff8c6a7f18309611b3 /core/src/main/scala/org/apache/hadoop/mapreduce/SparkHadoopMapReduceUtil.scala
parentf73b56f5e5d94f83d980475d3f39548986a92dd6 (diff)
downloadspark-f8e5732307dcb1482d9bcf1162a1090ef9a7b913.tar.gz
spark-f8e5732307dcb1482d9bcf1162a1090ef9a7b913.tar.bz2
spark-f8e5732307dcb1482d9bcf1162a1090ef9a7b913.zip
SPARK-1209 [CORE] (Take 2) SparkHadoop{MapRed,MapReduce}Util should not use package org.apache.hadoop
andrewor14 Another try at SPARK-1209, to address https://github.com/apache/spark/pull/2814#issuecomment-61197619 I successfully tested with `mvn -Dhadoop.version=1.0.4 -DskipTests clean package; mvn -Dhadoop.version=1.0.4 test` I assume that is what failed Jenkins last time. I also tried `-Dhadoop.version1.2.1` and `-Phadoop-2.4 -Pyarn -Phive` for more coverage. So this is why the class was put in `org.apache.hadoop` to begin with, I assume. One option is to leave this as-is for now and move it only when Hadoop 1.0.x support goes away. This is the other option, which adds a call to force the constructor to be public at run-time. It's probably less surprising than putting Spark code in `org.apache.hadoop`, but, does involve reflection. A `SecurityManager` might forbid this, but it would forbid a lot of stuff Spark does. This would also only affect Hadoop 1.0.x it seems. Author: Sean Owen <sowen@cloudera.com> Closes #3048 from srowen/SPARK-1209 and squashes the following commits: 0d48f4b [Sean Owen] For Hadoop 1.0.x, make certain constructors public, which were public in later versions 466e179 [Sean Owen] Disable MIMA warnings resulting from moving the class -- this was also part of the PairRDDFunctions type hierarchy though? eb61820 [Sean Owen] Move SparkHadoopMapRedUtil / SparkHadoopMapReduceUtil from org.apache.hadoop to org.apache.spark
Diffstat (limited to 'core/src/main/scala/org/apache/hadoop/mapreduce/SparkHadoopMapReduceUtil.scala')
-rw-r--r--core/src/main/scala/org/apache/hadoop/mapreduce/SparkHadoopMapReduceUtil.scala79
1 files changed, 0 insertions, 79 deletions
diff --git a/core/src/main/scala/org/apache/hadoop/mapreduce/SparkHadoopMapReduceUtil.scala b/core/src/main/scala/org/apache/hadoop/mapreduce/SparkHadoopMapReduceUtil.scala
deleted file mode 100644
index 1fca5729c6..0000000000
--- a/core/src/main/scala/org/apache/hadoop/mapreduce/SparkHadoopMapReduceUtil.scala
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce
-
-import java.lang.{Boolean => JBoolean, Integer => JInteger}
-
-import org.apache.hadoop.conf.Configuration
-
-private[apache]
-trait SparkHadoopMapReduceUtil {
- def newJobContext(conf: Configuration, jobId: JobID): JobContext = {
- val klass = firstAvailableClass(
- "org.apache.hadoop.mapreduce.task.JobContextImpl", // hadoop2, hadoop2-yarn
- "org.apache.hadoop.mapreduce.JobContext") // hadoop1
- val ctor = klass.getDeclaredConstructor(classOf[Configuration], classOf[JobID])
- ctor.newInstance(conf, jobId).asInstanceOf[JobContext]
- }
-
- def newTaskAttemptContext(conf: Configuration, attemptId: TaskAttemptID): TaskAttemptContext = {
- val klass = firstAvailableClass(
- "org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl", // hadoop2, hadoop2-yarn
- "org.apache.hadoop.mapreduce.TaskAttemptContext") // hadoop1
- val ctor = klass.getDeclaredConstructor(classOf[Configuration], classOf[TaskAttemptID])
- ctor.newInstance(conf, attemptId).asInstanceOf[TaskAttemptContext]
- }
-
- def newTaskAttemptID(
- jtIdentifier: String,
- jobId: Int,
- isMap: Boolean,
- taskId: Int,
- attemptId: Int) = {
- val klass = Class.forName("org.apache.hadoop.mapreduce.TaskAttemptID")
- try {
- // First, attempt to use the old-style constructor that takes a boolean isMap
- // (not available in YARN)
- val ctor = klass.getDeclaredConstructor(classOf[String], classOf[Int], classOf[Boolean],
- classOf[Int], classOf[Int])
- ctor.newInstance(jtIdentifier, new JInteger(jobId), new JBoolean(isMap), new JInteger(taskId),
- new JInteger(attemptId)).asInstanceOf[TaskAttemptID]
- } catch {
- case exc: NoSuchMethodException => {
- // If that failed, look for the new constructor that takes a TaskType (not available in 1.x)
- val taskTypeClass = Class.forName("org.apache.hadoop.mapreduce.TaskType")
- .asInstanceOf[Class[Enum[_]]]
- val taskType = taskTypeClass.getMethod("valueOf", classOf[String]).invoke(
- taskTypeClass, if(isMap) "MAP" else "REDUCE")
- val ctor = klass.getDeclaredConstructor(classOf[String], classOf[Int], taskTypeClass,
- classOf[Int], classOf[Int])
- ctor.newInstance(jtIdentifier, new JInteger(jobId), taskType, new JInteger(taskId),
- new JInteger(attemptId)).asInstanceOf[TaskAttemptID]
- }
- }
- }
-
- private def firstAvailableClass(first: String, second: String): Class[_] = {
- try {
- Class.forName(first)
- } catch {
- case e: ClassNotFoundException =>
- Class.forName(second)
- }
- }
-}