aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala6
1 files changed, 3 insertions, 3 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index 728f3d1aed..51e5bb88d2 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -106,7 +106,7 @@ class HadoopRDD[K, V](
protected val jobConfCacheKey = "rdd_%d_job_conf".format(id)
- private val inputFormatCacheKey = "rdd_%d_input_format".format(id)
+ protected val inputFormatCacheKey = "rdd_%d_input_format".format(id)
// Returns a JobConf that will be used on slaves to obtain input splits for Hadoop reads.
protected def getJobConf(): JobConf = {
@@ -122,7 +122,7 @@ class HadoopRDD[K, V](
}
}
- def getInputFormat(conf: JobConf): InputFormat[K, V] = {
+ protected def getInputFormat(conf: JobConf): InputFormat[K, V] = {
if (HadoopRDD.containsCachedMetadata(inputFormatCacheKey)) {
return HadoopRDD.getCachedMetadata(inputFormatCacheKey).asInstanceOf[InputFormat[K, V]]
}
@@ -196,7 +196,7 @@ class HadoopRDD[K, V](
def getConf: Configuration = getJobConf()
}
-object HadoopRDD {
+private[spark] object HadoopRDD {
def getCachedMetadata(key: String) = SparkEnv.get.hadoop.hadoopJobMetadata.get(key)
def containsCachedMetadata(key: String) = SparkEnv.get.hadoop.hadoopJobMetadata.containsKey(key)