aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitriy Lyubimov <dlyubimov@apache.org>2013-07-30 17:51:44 -0700
committerDmitriy Lyubimov <dlyubimov@apache.org>2013-07-30 17:51:44 -0700
commitca33b12e98ea887484ee30084efb194b16d571cc (patch)
tree84cbe9fd1bc814b77f5eea27b0bb43bb7bb854ca
parent483d8dd694c6b752927be494d8e878e9eee895a2 (diff)
downloadspark-ca33b12e98ea887484ee30084efb194b16d571cc.tar.gz
spark-ca33b12e98ea887484ee30084efb194b16d571cc.tar.bz2
spark-ca33b12e98ea887484ee30084efb194b16d571cc.zip
resetting wrap and continuation indent = 4
-rw-r--r--core/src/main/scala/spark/rdd/ParallelCollectionRDD.scala22
1 files changed, 11 insertions, 11 deletions
diff --git a/core/src/main/scala/spark/rdd/ParallelCollectionRDD.scala b/core/src/main/scala/spark/rdd/ParallelCollectionRDD.scala
index f0b3bbf374..c3cf2adaac 100644
--- a/core/src/main/scala/spark/rdd/ParallelCollectionRDD.scala
+++ b/core/src/main/scala/spark/rdd/ParallelCollectionRDD.scala
@@ -27,10 +27,10 @@ import scala.Serializable
import akka.serialization.JavaSerializer
private[spark] class ParallelCollectionPartition[T: ClassManifest](
- var rddId: Long,
- var slice: Int,
- var values: Seq[T])
- extends Partition with Serializable {
+ var rddId: Long,
+ var slice: Int,
+ var values: Seq[T])
+ extends Partition with Serializable {
// for externalization
def this() = this(0, 0, null)
@@ -54,7 +54,7 @@ private[spark] class ParallelCollectionPartition[T: ClassManifest](
// than going thru serialization,
// to avoid a separate serialization header.
sfactory match {
- case js:JavaSerializer => out.defaultWriteObject()
+ case js: JavaSerializer => out.defaultWriteObject()
case _ => {
// for every other serializer, we
// assume that it would support Seq[T] and
@@ -74,7 +74,7 @@ private[spark] class ParallelCollectionPartition[T: ClassManifest](
val sfactory = SparkEnv.get.serializer
sfactory match {
- case js:JavaSerializer => in.defaultReadObject()
+ case js: JavaSerializer => in.defaultReadObject()
case _ =>
val ser = sfactory.newInstance()
rddId = in.readLong()
@@ -90,11 +90,11 @@ private[spark] class ParallelCollectionPartition[T: ClassManifest](
}
private[spark] class ParallelCollectionRDD[T: ClassManifest](
- @transient sc: SparkContext,
- @transient data: Seq[T],
- numSlices: Int,
- locationPrefs: Map[Int, Seq[String]])
- extends RDD[T](sc, Nil) {
+ @transient sc: SparkContext,
+ @transient data: Seq[T],
+ numSlices: Int,
+ locationPrefs: Map[Int, Seq[String]])
+ extends RDD[T](sc, Nil) {
// TODO: Right now, each split sends along its full data, even if later down the RDD chain it gets
// cached. It might be worthwhile to write the data to a file in the DFS and read it in the split
// instead.