aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/programming-guide.md2
-rw-r--r--python/pyspark/rdd.py4
2 files changed, 3 insertions, 3 deletions
diff --git a/docs/programming-guide.md b/docs/programming-guide.md
index 2443fc29b4..6486614e71 100644
--- a/docs/programming-guide.md
+++ b/docs/programming-guide.md
@@ -886,7 +886,7 @@ for details.
<td> <b>groupByKey</b>([<i>numTasks</i>]) </td>
<td> When called on a dataset of (K, V) pairs, returns a dataset of (K, Iterable&lt;V&gt;) pairs. <br />
<b>Note:</b> If you are grouping in order to perform an aggregation (such as a sum or
- average) over each key, using <code>reduceByKey</code> or <code>combineByKey</code> will yield much better
+ average) over each key, using <code>reduceByKey</code> or <code>aggregateByKey</code> will yield much better
performance.
<br />
<b>Note:</b> By default, the level of parallelism in the output depends on the number of partitions of the parent RDD.
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index f4cfe4845d..efd2f35912 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -1634,8 +1634,8 @@ class RDD(object):
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
- sum or average) over each key, using reduceByKey will provide much
- better performance.
+ sum or average) over each key, using reduceByKey or aggregateByKey will
+ provide much better performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))