aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCliff Engle <cliffengle@gmail.com>2012-02-29 16:33:23 -0800
committerCliff Engle <cliffengle@gmail.com>2012-02-29 16:33:23 -0800
commitdd68cb60991f430e2c6d73eb8286fd038741154d (patch)
tree31d6604520f2f4c790c22d004cc845b180a53edf
parent1e10df0a465d58ef8cf83e4fb1db539bdd1070b2 (diff)
downloadspark-dd68cb60991f430e2c6d73eb8286fd038741154d.tar.gz
spark-dd68cb60991f430e2c6d73eb8286fd038741154d.tar.bz2
spark-dd68cb60991f430e2c6d73eb8286fd038741154d.zip
Get key and value container from RecordReader
-rw-r--r--core/src/main/scala/spark/HadoopRDD.scala16
1 files changed, 2 insertions, 14 deletions
diff --git a/core/src/main/scala/spark/HadoopRDD.scala b/core/src/main/scala/spark/HadoopRDD.scala
index cb7923317a..598a18fe72 100644
--- a/core/src/main/scala/spark/HadoopRDD.scala
+++ b/core/src/main/scala/spark/HadoopRDD.scala
@@ -60,18 +60,6 @@ class HadoopRDD[K, V](
.asInstanceOf[InputFormat[K, V]]
}
- /**
- * Helper method for creating a Hadoop Writable, because the commonly used NullWritable class has
- * no constructor.
- */
- def createWritable[T](clazz: Class[T]): T = {
- if (clazz == classOf[NullWritable]) {
- NullWritable.get().asInstanceOf[T]
- } else {
- clazz.newInstance()
- }
- }
-
override def splits = splits_
override def compute(theSplit: Split) = new Iterator[(K, V)] {
@@ -82,8 +70,8 @@ class HadoopRDD[K, V](
val fmt = createInputFormat(conf)
reader = fmt.getRecordReader(split.inputSplit.value, conf, Reporter.NULL)
- val key: K = createWritable(keyClass)
- val value: V = createWritable(valueClass)
+ val key: K = reader.createKey()
+ val value: V = reader.createValue()
var gotNext = false
var finished = false