aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorHarvey Feng <harvey@databricks.com>2013-10-05 17:14:19 -0700
committerHarvey Feng <harvey@databricks.com>2013-10-05 17:14:19 -0700
commit96929f28bb9c929ca3309dbe99910097f5eb3c8c (patch)
tree173e467607c22373cc059ca50aa6a3436fdfa7c9 /core
parentb5e93c1227f0af965f15e9455e5f4bd72680ebde (diff)
downloadspark-96929f28bb9c929ca3309dbe99910097f5eb3c8c.tar.gz
spark-96929f28bb9c929ca3309dbe99910097f5eb3c8c.tar.bz2
spark-96929f28bb9c929ca3309dbe99910097f5eb3c8c.zip
Make HadoopRDD object Spark private.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala6
1 files changed, 3 insertions, 3 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index 728f3d1aed..51e5bb88d2 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -106,7 +106,7 @@ class HadoopRDD[K, V](
protected val jobConfCacheKey = "rdd_%d_job_conf".format(id)
- private val inputFormatCacheKey = "rdd_%d_input_format".format(id)
+ protected val inputFormatCacheKey = "rdd_%d_input_format".format(id)
// Returns a JobConf that will be used on slaves to obtain input splits for Hadoop reads.
protected def getJobConf(): JobConf = {
@@ -122,7 +122,7 @@ class HadoopRDD[K, V](
}
}
- def getInputFormat(conf: JobConf): InputFormat[K, V] = {
+ protected def getInputFormat(conf: JobConf): InputFormat[K, V] = {
if (HadoopRDD.containsCachedMetadata(inputFormatCacheKey)) {
return HadoopRDD.getCachedMetadata(inputFormatCacheKey).asInstanceOf[InputFormat[K, V]]
}
@@ -196,7 +196,7 @@ class HadoopRDD[K, V](
def getConf: Configuration = getJobConf()
}
-object HadoopRDD {
+private[spark] object HadoopRDD {
def getCachedMetadata(key: String) = SparkEnv.get.hadoop.hadoopJobMetadata.get(key)
def containsCachedMetadata(key: String) = SparkEnv.get.hadoop.hadoopJobMetadata.containsKey(key)