aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorraela <raela@databricks.com>2016-02-10 17:00:54 -0800
committerReynold Xin <rxin@databricks.com>2016-02-10 17:00:54 -0800
commit719973b05ef6d6b9fbb83d76aebac6454ae84fad (patch)
tree0eaa9f4f9c44fe6dbf0e37f559a0df3de7686788 /sql
parent0902e20288366db6270f3a444e66114b1b63a3e2 (diff)
downloadspark-719973b05ef6d6b9fbb83d76aebac6454ae84fad.tar.gz
spark-719973b05ef6d6b9fbb83d76aebac6454ae84fad.tar.bz2
spark-719973b05ef6d6b9fbb83d76aebac6454ae84fad.zip
[SPARK-13274] Fix Aggregator Links on GroupedDataset Scala API
Update Aggregator links to point to #org.apache.spark.sql.expressions.Aggregator Author: raela <raela@databricks.com> Closes #11158 from raelawang/master.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala12
1 files changed, 8 insertions, 4 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
index c0e28f2dc5..53cb8eb524 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
@@ -101,7 +101,8 @@ class GroupedDataset[K, V] private[sql](
*
* This function does not support partial aggregation, and as a result requires shuffling all
* the data in the [[Dataset]]. If an application intends to perform an aggregation over each
- * key, it is best to use the reduce function or an [[Aggregator]].
+ * key, it is best to use the reduce function or an
+ * [[org.apache.spark.sql.expressions#Aggregator Aggregator]].
*
* Internally, the implementation will spill to disk if any given group is too large to fit into
* memory. However, users must take care to avoid materializing the whole iterator for a group
@@ -128,7 +129,8 @@ class GroupedDataset[K, V] private[sql](
*
* This function does not support partial aggregation, and as a result requires shuffling all
* the data in the [[Dataset]]. If an application intends to perform an aggregation over each
- * key, it is best to use the reduce function or an [[Aggregator]].
+ * key, it is best to use the reduce function or an
+ * [[org.apache.spark.sql.expressions#Aggregator Aggregator]].
*
* Internally, the implementation will spill to disk if any given group is too large to fit into
* memory. However, users must take care to avoid materializing the whole iterator for a group
@@ -148,7 +150,8 @@ class GroupedDataset[K, V] private[sql](
*
* This function does not support partial aggregation, and as a result requires shuffling all
* the data in the [[Dataset]]. If an application intends to perform an aggregation over each
- * key, it is best to use the reduce function or an [[Aggregator]].
+ * key, it is best to use the reduce function or an
+ * [[org.apache.spark.sql.expressions#Aggregator Aggregator]].
*
* Internally, the implementation will spill to disk if any given group is too large to fit into
* memory. However, users must take care to avoid materializing the whole iterator for a group
@@ -169,7 +172,8 @@ class GroupedDataset[K, V] private[sql](
*
* This function does not support partial aggregation, and as a result requires shuffling all
* the data in the [[Dataset]]. If an application intends to perform an aggregation over each
- * key, it is best to use the reduce function or an [[Aggregator]].
+ * key, it is best to use the reduce function or an
+ * [[org.apache.spark.sql.expressions#Aggregator Aggregator]].
*
* Internally, the implementation will spill to disk if any given group is too large to fit into
* memory. However, users must take care to avoid materializing the whole iterator for a group