aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorImran Rashid <imran@quantifind.com>2012-07-16 18:25:15 -0700
committerMatei Zaharia <matei@eecs.berkeley.edu>2012-07-28 20:16:01 -0700
commit3b392c67dbeb7b2267015ffbeb2aac70dfc01870 (patch)
tree0b80632074f8b26b1e369de70391cd7186f462a7
parentf1face1ea9d256f76911edff12334b6cde43d214 (diff)
downloadspark-3b392c67dbeb7b2267015ffbeb2aac70dfc01870.tar.gz
spark-3b392c67dbeb7b2267015ffbeb2aac70dfc01870.tar.bz2
spark-3b392c67dbeb7b2267015ffbeb2aac70dfc01870.zip
fix up scaladoc, naming of type parameters
-rw-r--r--core/src/main/scala/spark/Accumulators.scala24
-rw-r--r--core/src/main/scala/spark/SparkContext.scala3
2 files changed, 12 insertions, 15 deletions
diff --git a/core/src/main/scala/spark/Accumulators.scala b/core/src/main/scala/spark/Accumulators.scala
index 5a1ca49626..16e3657898 100644
--- a/core/src/main/scala/spark/Accumulators.scala
+++ b/core/src/main/scala/spark/Accumulators.scala
@@ -19,7 +19,7 @@ class Accumulable[T,R] (
/**
* add more data to this accumulator / accumulable
- * @param term
+ * @param term the data to add
*/
def += (term: R) { value_ = param.addAccumulator(value_, term) }
@@ -27,7 +27,7 @@ class Accumulable[T,R] (
* merge two accumulable objects together
* <p>
* Normally, a user will not want to use this version, but will instead call `+=`.
- * @param term
+ * @param term the other Accumulable that will get merged with this
*/
def ++= (term: T) { value_ = param.addInPlace(value_, term)}
def value = this.value_
@@ -64,33 +64,33 @@ trait AccumulatorParam[T] extends AccumulableParam[T,T] {
/**
* A datatype that can be accumulated, ie. has a commutative & associative +.
- * <p>
+ *
* You must define how to add data, and how to merge two of these together. For some datatypes, these might be
* the same operation (eg., a counter). In that case, you might want to use [[spark.AccumulatorParam]]. They won't
* always be the same, though -- eg., imagine you are accumulating a set. You will add items to the set, and you
* will union two sets together.
*
- * @tparam T the full accumulated data
- * @tparam R partial data that can be added in
+ * @tparam R the full accumulated data
+ * @tparam T partial data that can be added in
*/
-trait AccumulableParam[T,R] extends Serializable {
+trait AccumulableParam[R,T] extends Serializable {
/**
* Add additional data to the accumulator value.
* @param t1 the current value of the accumulator
* @param t2 the data to be added to the accumulator
* @return the new value of the accumulator
*/
- def addAccumulator(t1: T, t2: R) : T
+ def addAccumulator(t1: R, t2: T) : R
/**
* merge two accumulated values together
- * @param t1
- * @param t2
- * @return
+ * @param t1 one set of accumulated data
+ * @param t2 another set of accumulated data
+ * @return both data sets merged together
*/
- def addInPlace(t1: T, t2: T): T
+ def addInPlace(t1: R, t2: R): R
- def zero(initialValue: T): T
+ def zero(initialValue: R): R
}
// TODO: The multi-thread support in accumulators is kind of lame; check
diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala
index ea85324c35..32f37822a5 100644
--- a/core/src/main/scala/spark/SparkContext.scala
+++ b/core/src/main/scala/spark/SparkContext.scala
@@ -287,11 +287,8 @@ class SparkContext(
/**
* create an accumulatable shared variable, with a `+=` method
- * @param initialValue
- * @param param
* @tparam T accumulator type
* @tparam R type that can be added to the accumulator
- * @return
*/
def accumulable[T,R](initialValue: T)(implicit param: AccumulableParam[T,R]) =
new Accumulable(initialValue, param)