aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2013-06-19 15:21:03 -0700
committerMatei Zaharia <matei@eecs.berkeley.edu>2013-06-19 15:21:03 -0700
commit71030ba3ebb0a2bc371f51383aaf11e6c2dcfc05 (patch)
tree0996e316187d8580f978be51e42cd6a17237d0b2 /core/src
parent7902baddc797f86f5bdbcc966f5cd60545638bf7 (diff)
parent0a2a9bce1e83e891334985c29176c6426b8b1751 (diff)
downloadspark-71030ba3ebb0a2bc371f51383aaf11e6c2dcfc05.tar.gz
spark-71030ba3ebb0a2bc371f51383aaf11e6c2dcfc05.tar.bz2
spark-71030ba3ebb0a2bc371f51383aaf11e6c2dcfc05.zip
Merge pull request #654 from lyogavin/enhance_pipe
fix typo and coding style in #638
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/spark/RDD.scala14
-rw-r--r--core/src/main/scala/spark/rdd/PipedRDD.scala2
2 files changed, 8 insertions, 8 deletions
diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala
index 223dcdc19d..709271d4eb 100644
--- a/core/src/main/scala/spark/RDD.scala
+++ b/core/src/main/scala/spark/RDD.scala
@@ -368,13 +368,13 @@ abstract class RDD[T: ClassManifest](
* @param printPipeContext Before piping elements, this function is called as an oppotunity
* to pipe context data. Print line function (like out.println) will be
* passed as printPipeContext's parameter.
- * @param printPipeContext Use this function to customize how to pipe elements. This function
- * will be called with each RDD element as the 1st parameter, and the
- * print line function (like out.println()) as the 2nd parameter.
- * An example of pipe the RDD data of groupBy() in a streaming way,
- * instead of constructing a huge String to concat all the elements:
- * def printRDDElement(record:(String, Seq[String]), f:String=>Unit) =
- * for (e <- record._2){f(e)}
+ * @param printRDDElement Use this function to customize how to pipe elements. This function
+ * will be called with each RDD element as the 1st parameter, and the
+ * print line function (like out.println()) as the 2nd parameter.
+ * An example of pipe the RDD data of groupBy() in a streaming way,
+ * instead of constructing a huge String to concat all the elements:
+ * def printRDDElement(record:(String, Seq[String]), f:String=>Unit) =
+ * for (e <- record._2){f(e)}
* @return the result RDD
*/
def pipe(
diff --git a/core/src/main/scala/spark/rdd/PipedRDD.scala b/core/src/main/scala/spark/rdd/PipedRDD.scala
index b2c07891ab..c0baf43d43 100644
--- a/core/src/main/scala/spark/rdd/PipedRDD.scala
+++ b/core/src/main/scala/spark/rdd/PipedRDD.scala
@@ -62,7 +62,7 @@ class PipedRDD[T: ClassManifest](
val out = new PrintWriter(proc.getOutputStream)
// input the pipe context firstly
- if ( printPipeContext != null) {
+ if (printPipeContext != null) {
printPipeContext(out.println(_))
}
for (elem <- firstParent[T].iterator(split, context)) {