From 0a4844f90a712e796c9404b422cea76d21a5d2e3 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Mon, 11 May 2015 11:35:16 -0700 Subject: [SPARK-7462] By default retain group by columns in aggregate Updated Java, Scala, Python, and R. Author: Reynold Xin Author: Shivaram Venkataraman Closes #5996 from rxin/groupby-retain and squashes the following commits: aac7119 [Reynold Xin] Merge branch 'groupby-retain' of github.com:rxin/spark into groupby-retain f6858f6 [Reynold Xin] Merge branch 'master' into groupby-retain 5f923c0 [Reynold Xin] Merge pull request #15 from shivaram/sparkr-groupby-retrain c1de670 [Shivaram Venkataraman] Revert workaround in SparkR to retain grouped cols Based on reverting code added in commit https://github.com/amplab-extras/spark/commit/9a6be746efc9fafad88122fa2267862ef87aa0e1 b8b87e1 [Reynold Xin] Fixed DataFrameJoinSuite. d910141 [Reynold Xin] Updated rest of the files 1e6e666 [Reynold Xin] [SPARK-7462] By default retain group by columns in aggregate --- R/pkg/R/group.R | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'R') diff --git a/R/pkg/R/group.R b/R/pkg/R/group.R index 5a7a8a2cab..b758481997 100644 --- a/R/pkg/R/group.R +++ b/R/pkg/R/group.R @@ -102,9 +102,7 @@ setMethod("agg", } } jcols <- lapply(cols, function(c) { c@jc }) - # the GroupedData.agg(col, cols*) API does not contain grouping Column - sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "aggWithGrouping", - x@sgd, listToSeq(jcols)) + sdf <- callJMethod(x@sgd, "agg", jcols[[1]], listToSeq(jcols[-1])) } else { stop("agg can only support Column or character") } -- cgit v1.2.3