aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2010-10-16 21:21:16 -0700
committerMatei Zaharia <matei@eecs.berkeley.edu>2010-10-16 21:21:16 -0700
commit023ed194b465b1ada80be1ef0deea0da99ce109e (patch)
treebc878a92cece4e7f6a9e7f2b6f669c9055e2104b /src
parent74bbfa91c252150811ce9617424cdeea3711bdf9 (diff)
downloadspark-023ed194b465b1ada80be1ef0deea0da99ce109e.tar.gz
spark-023ed194b465b1ada80be1ef0deea0da99ce109e.tar.bz2
spark-023ed194b465b1ada80be1ef0deea0da99ce109e.zip
Fixed some whitespace
Diffstat (limited to 'src')
-rw-r--r--src/scala/spark/HadoopFile.scala4
-rw-r--r--src/scala/spark/HttpServer.scala2
-rw-r--r--src/scala/spark/RDD.scala22
3 files changed, 14 insertions, 14 deletions
diff --git a/src/scala/spark/HadoopFile.scala b/src/scala/spark/HadoopFile.scala
index aa28569eda..5746c433ee 100644
--- a/src/scala/spark/HadoopFile.scala
+++ b/src/scala/spark/HadoopFile.scala
@@ -15,7 +15,7 @@ import org.apache.hadoop.util.ReflectionUtils
/** A Spark split class that wraps around a Hadoop InputSplit */
@serializable class HadoopSplit(@transient s: InputSplit)
-extends Split {
+extends Split {
val inputSplit = new SerializableWritable[InputSplit](s)
// Hadoop gives each split a unique toString value, so use this as our ID
@@ -48,7 +48,7 @@ extends RDD[(K, V)](sc) {
}
override def splits = splits_
-
+
override def iterator(theSplit: Split) = new Iterator[(K, V)] {
val split = theSplit.asInstanceOf[HadoopSplit]
var reader: RecordReader[K, V] = null
diff --git a/src/scala/spark/HttpServer.scala b/src/scala/spark/HttpServer.scala
index 08eb08bbe3..d5bdd245bb 100644
--- a/src/scala/spark/HttpServer.scala
+++ b/src/scala/spark/HttpServer.scala
@@ -11,7 +11,7 @@ import org.eclipse.jetty.util.thread.QueuedThreadPool
/**
- * Exception type thrown by HttpServer when it is in the wrong state
+ * Exception type thrown by HttpServer when it is in the wrong state
* for an operation.
*/
class ServerStateException(message: String) extends Exception(message)
diff --git a/src/scala/spark/RDD.scala b/src/scala/spark/RDD.scala
index 20865d7d28..9dd8bc9dce 100644
--- a/src/scala/spark/RDD.scala
+++ b/src/scala/spark/RDD.scala
@@ -78,7 +78,7 @@ abstract class RDD[T: ClassManifest](
case _ => throw new UnsupportedOperationException("empty collection")
}
- def count(): Long =
+ def count(): Long =
try { map(x => 1L).reduce(_+_) }
catch { case e: UnsupportedOperationException => 0L }
@@ -128,7 +128,7 @@ extends RDDTask[Option[T], T](rdd, split) with Logging {
}
class MappedRDD[U: ClassManifest, T: ClassManifest](
- prev: RDD[T], f: T => U)
+ prev: RDD[T], f: T => U)
extends RDD[U](prev.sparkContext) {
override def splits = prev.splits
override def preferredLocations(split: Split) = prev.preferredLocations(split)
@@ -137,7 +137,7 @@ extends RDD[U](prev.sparkContext) {
}
class FilteredRDD[T: ClassManifest](
- prev: RDD[T], f: T => Boolean)
+ prev: RDD[T], f: T => Boolean)
extends RDD[T](prev.sparkContext) {
override def splits = prev.splits
override def preferredLocations(split: Split) = prev.preferredLocations(split)
@@ -146,7 +146,7 @@ extends RDD[T](prev.sparkContext) {
}
class FlatMappedRDD[U: ClassManifest, T: ClassManifest](
- prev: RDD[T], f: T => Traversable[U])
+ prev: RDD[T], f: T => Traversable[U])
extends RDD[U](prev.sparkContext) {
override def splits = prev.splits
override def preferredLocations(split: Split) = prev.preferredLocations(split)
@@ -155,7 +155,7 @@ extends RDD[U](prev.sparkContext) {
override def taskStarted(split: Split, slot: SlaveOffer) = prev.taskStarted(split, slot)
}
-class SplitRDD[T: ClassManifest](prev: RDD[T])
+class SplitRDD[T: ClassManifest](prev: RDD[T])
extends RDD[Array[T]](prev.sparkContext) {
override def splits = prev.splits
override def preferredLocations(split: Split) = prev.preferredLocations(split)
@@ -170,16 +170,16 @@ extends RDD[Array[T]](prev.sparkContext) {
}
class SampledRDD[T: ClassManifest](
- prev: RDD[T], withReplacement: Boolean, frac: Double, seed: Int)
+ prev: RDD[T], withReplacement: Boolean, frac: Double, seed: Int)
extends RDD[T](prev.sparkContext) {
-
+
@transient val splits_ = { val rg = new Random(seed); prev.splits.map(x => new SeededSplit(x, rg.nextInt)) }
override def splits = splits_.asInstanceOf[Array[Split]]
override def preferredLocations(split: Split) = prev.preferredLocations(split.asInstanceOf[SeededSplit].prev)
- override def iterator(splitIn: Split) = {
+ override def iterator(splitIn: Split) = {
val split = splitIn.asInstanceOf[SeededSplit]
val rg = new Random(split.seed);
// Sampling with replacement (TODO: use reservoir sampling to make this more efficient?)
@@ -213,7 +213,7 @@ extends RDD[T](prev.sparkContext) with Logging {
else
prev.preferredLocations(split)
}
-
+
override def iterator(split: Split): Iterator[T] = {
val key = id + "::" + split.getId()
logInfo("CachedRDD split key is " + key)
@@ -278,7 +278,7 @@ extends Split {
class UnionRDD[T: ClassManifest](sc: SparkContext, rdds: Seq[RDD[T]])
extends RDD[T](sc) {
@transient val splits_ : Array[Split] = {
- val splits: Seq[Split] =
+ val splits: Seq[Split] =
for (rdd <- rdds; split <- rdd.splits)
yield new UnionSplit(rdd, split)
splits.toArray
@@ -289,7 +289,7 @@ extends RDD[T](sc) {
override def iterator(s: Split): Iterator[T] =
s.asInstanceOf[UnionSplit[T]].iterator()
- override def preferredLocations(s: Split): Seq[String] =
+ override def preferredLocations(s: Split): Seq[String] =
s.asInstanceOf[UnionSplit[T]].preferredLocations()
}