aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2012-03-06 13:38:32 -0800
committerMatei Zaharia <matei@eecs.berkeley.edu>2012-03-06 13:38:32 -0800
commita5e2b6a6bd682eac2859b9105feabaf95ec1d059 (patch)
tree5eea4dc7bd17a52ebf53ff55aab94f1fb954e5ef
parent97eee50825e91258b44710c4af45be662935aa9d (diff)
parentdd68cb60991f430e2c6d73eb8286fd038741154d (diff)
downloadspark-a5e2b6a6bd682eac2859b9105feabaf95ec1d059.tar.gz
spark-a5e2b6a6bd682eac2859b9105feabaf95ec1d059.tar.bz2
spark-a5e2b6a6bd682eac2859b9105feabaf95ec1d059.zip
Merge pull request #112 from cengle/master
Changed HadoopRDD to get key and value containers from the RecordReader instead of through reflection
-rw-r--r--core/src/main/scala/spark/HadoopRDD.scala16
1 files changed, 2 insertions, 14 deletions
diff --git a/core/src/main/scala/spark/HadoopRDD.scala b/core/src/main/scala/spark/HadoopRDD.scala
index cb7923317a..598a18fe72 100644
--- a/core/src/main/scala/spark/HadoopRDD.scala
+++ b/core/src/main/scala/spark/HadoopRDD.scala
@@ -60,18 +60,6 @@ class HadoopRDD[K, V](
.asInstanceOf[InputFormat[K, V]]
}
- /**
- * Helper method for creating a Hadoop Writable, because the commonly used NullWritable class has
- * no constructor.
- */
- def createWritable[T](clazz: Class[T]): T = {
- if (clazz == classOf[NullWritable]) {
- NullWritable.get().asInstanceOf[T]
- } else {
- clazz.newInstance()
- }
- }
-
override def splits = splits_
override def compute(theSplit: Split) = new Iterator[(K, V)] {
@@ -82,8 +70,8 @@ class HadoopRDD[K, V](
val fmt = createInputFormat(conf)
reader = fmt.getRecordReader(split.inputSplit.value, conf, Reporter.NULL)
- val key: K = createWritable(keyClass)
- val value: V = createWritable(valueClass)
+ val key: K = reader.createKey()
+ val value: V = reader.createValue()
var gotNext = false
var finished = false