aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala
diff options
context:
space:
mode:
authorAaron Davidson <aaron@databricks.com>2014-07-14 23:38:12 -0700
committerPatrick Wendell <pwendell@gmail.com>2014-07-14 23:38:12 -0700
commita2aa7bebae31e1e7ec23d31aaa436283743b283b (patch)
tree7528f40751d1372a61ce9a59a3e4d8d252edb69a /core/src/main/scala
parent1f99fea53b5ff994dd4a12b44625d35186e269ff (diff)
downloadspark-a2aa7bebae31e1e7ec23d31aaa436283743b283b.tar.gz
spark-a2aa7bebae31e1e7ec23d31aaa436283743b283b.tar.bz2
spark-a2aa7bebae31e1e7ec23d31aaa436283743b283b.zip
Add/increase severity of warning in documentation of groupBy()
groupBy()/groupByKey() is notorious for being a very convenient API that can lead to poor performance when used incorrectly. This PR just makes it clear that users should be cautious not to rely on this API when they really want a different (more performant) one, such as reduceByKey(). (Note that one source of confusion is the name; this groupBy() is not the same as a SQL GROUP-BY, which is used for aggregation and is more similar in nature to Spark's reduceByKey().) Author: Aaron Davidson <aaron@databricks.com> Closes #1380 from aarondav/warning and squashes the following commits: f60da39 [Aaron Davidson] Give better advice d0afb68 [Aaron Davidson] Add/increase severity of warning in documentation of groupBy()
Diffstat (limited to 'core/src/main/scala')
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala18
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/RDD.scala12
2 files changed, 21 insertions, 9 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index fc9beb166b..9d62d53fcb 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -353,9 +353,9 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
* Group the values for each key in the RDD into a single sequence. Allows controlling the
* partitioning of the resulting key-value pair RDD by passing a Partitioner.
*
- * Note: If you are grouping in order to perform an aggregation (such as a sum or average) over
- * each key, using [[PairRDDFunctions.reduceByKey]] or [[PairRDDFunctions.combineByKey]]
- * will provide much better performance.
+ * Note: This operation may be very expensive. If you are grouping in order to perform an
+ * aggregation (such as a sum or average) over each key, using [[PairRDDFunctions.aggregateByKey]]
+ * or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
*/
def groupByKey(partitioner: Partitioner): RDD[(K, Iterable[V])] = {
// groupByKey shouldn't use map side combine because map side combine does not
@@ -373,9 +373,9 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
* Group the values for each key in the RDD into a single sequence. Hash-partitions the
* resulting RDD with into `numPartitions` partitions.
*
- * Note: If you are grouping in order to perform an aggregation (such as a sum or average) over
- * each key, using [[PairRDDFunctions.reduceByKey]] or [[PairRDDFunctions.combineByKey]]
- * will provide much better performance.
+ * Note: This operation may be very expensive. If you are grouping in order to perform an
+ * aggregation (such as a sum or average) over each key, using [[PairRDDFunctions.aggregateByKey]]
+ * or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
*/
def groupByKey(numPartitions: Int): RDD[(K, Iterable[V])] = {
groupByKey(new HashPartitioner(numPartitions))
@@ -462,9 +462,9 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
* Group the values for each key in the RDD into a single sequence. Hash-partitions the
* resulting RDD with the existing partitioner/parallelism level.
*
- * Note: If you are grouping in order to perform an aggregation (such as a sum or average) over
- * each key, using [[PairRDDFunctions.reduceByKey]] or [[PairRDDFunctions.combineByKey]]
- * will provide much better performance,
+ * Note: This operation may be very expensive. If you are grouping in order to perform an
+ * aggregation (such as a sum or average) over each key, using [[PairRDDFunctions.aggregateByKey]]
+ * or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
*/
def groupByKey(): RDD[(K, Iterable[V])] = {
groupByKey(defaultPartitioner(self))
diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
index 4e841bc992..a25f263bea 100644
--- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
@@ -509,6 +509,10 @@ abstract class RDD[T: ClassTag](
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key.
+ *
+ * Note: This operation may be very expensive. If you are grouping in order to perform an
+ * aggregation (such as a sum or average) over each key, using [[PairRDDFunctions.aggregateByKey]]
+ * or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
*/
def groupBy[K](f: T => K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] =
groupBy[K](f, defaultPartitioner(this))
@@ -516,6 +520,10 @@ abstract class RDD[T: ClassTag](
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key.
+ *
+ * Note: This operation may be very expensive. If you are grouping in order to perform an
+ * aggregation (such as a sum or average) over each key, using [[PairRDDFunctions.aggregateByKey]]
+ * or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
*/
def groupBy[K](f: T => K, numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] =
groupBy(f, new HashPartitioner(numPartitions))
@@ -523,6 +531,10 @@ abstract class RDD[T: ClassTag](
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key.
+ *
+ * Note: This operation may be very expensive. If you are grouping in order to perform an
+ * aggregation (such as a sum or average) over each key, using [[PairRDDFunctions.aggregateByKey]]
+ * or [[PairRDDFunctions.reduceByKey]] will provide much better performance.
*/
def groupBy[K](f: T => K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K] = null)
: RDD[(K, Iterable[T])] = {