aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakeshi YAMAMURO <linguin.m.s@gmail.com>2016-12-22 01:53:33 +0100
committerHerman van Hovell <hvanhovell@databricks.com>2016-12-22 01:53:33 +0100
commitb41ec997786e2be42a8a2a182212a610d08b221b (patch)
tree7f60e50dedd0407d6a4a4316241228e064602723
parent83a6ace0d1be44f70e768348ae6688798c84343e (diff)
downloadspark-b41ec997786e2be42a8a2a182212a610d08b221b.tar.gz
spark-b41ec997786e2be42a8a2a182212a610d08b221b.tar.bz2
spark-b41ec997786e2be42a8a2a182212a610d08b221b.zip
[SPARK-18528][SQL] Fix a bug to initialise an iterator of aggregation buffer
## What changes were proposed in this pull request? This pr is to fix an `NullPointerException` issue caused by a following `limit + aggregate` query; ``` scala> val df = Seq(("a", 1), ("b", 2), ("c", 1), ("d", 5)).toDF("id", "value") scala> df.limit(2).groupBy("id").count().show WARN TaskSetManager: Lost task 0.0 in stage 9.0 (TID 8204, lvsp20hdn012.stubprod.com): java.lang.NullPointerException at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.agg_doAggregateWithKeys$(Unknown Source) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source) ``` The root culprit is that [`$doAgg()`](https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala#L596) skips an initialization of [the buffer iterator](https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala#L603); `BaseLimitExec` sets `stopEarly=true` and `$doAgg()` exits in the middle without the initialization. ## How was this patch tested? Added a test to check if no exception happens for limit + aggregates in `DataFrameAggregateSuite.scala`. Author: Takeshi YAMAMURO <linguin.m.s@gmail.com> Closes #15980 from maropu/SPARK-18528.
-rw-r--r--sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala8
4 files changed, 22 insertions, 4 deletions
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java b/sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java
index 086547c793..730a4ae8d5 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java
@@ -70,6 +70,16 @@ public abstract class BufferedRowIterator {
}
/**
+ * Returns whether this iterator should stop fetching next row from [[CodegenSupport#inputRDDs]].
+ *
+ * If it returns true, the caller should exit the loop that [[InputAdapter]] generates.
+ * This interface is mainly used to limit the number of input rows.
+ */
+ protected boolean stopEarly() {
+ return false;
+ }
+
+ /**
* Returns whether `processNext()` should stop processing next row from `input` or not.
*
* If it returns true, the caller should exit the loop (return from processNext()).
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
index 516b9d5444..2ead8f6baa 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
@@ -241,7 +241,7 @@ case class InputAdapter(child: SparkPlan) extends UnaryExecNode with CodegenSupp
ctx.addMutableState("scala.collection.Iterator", input, s"$input = inputs[0];")
val row = ctx.freshName("row")
s"""
- | while ($input.hasNext()) {
+ | while ($input.hasNext() && !stopEarly()) {
| InternalRow $row = (InternalRow) $input.next();
| ${consume(ctx, null, row).trim}
| if (shouldStop()) return;
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala
index 9918ac327f..757fe2185d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala
@@ -70,10 +70,10 @@ trait BaseLimitExec extends UnaryExecNode with CodegenSupport {
val stopEarly = ctx.freshName("stopEarly")
ctx.addMutableState("boolean", stopEarly, s"$stopEarly = false;")
- ctx.addNewFunction("shouldStop", s"""
+ ctx.addNewFunction("stopEarly", s"""
@Override
- protected boolean shouldStop() {
- return !currentRows.isEmpty() || $stopEarly;
+ protected boolean stopEarly() {
+ return $stopEarly;
}
""")
val countTerm = ctx.freshName("count")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
index 7aa4f0026f..645175900f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
@@ -513,4 +513,12 @@ class DataFrameAggregateSuite extends QueryTest with SharedSQLContext {
df.groupBy($"x").agg(countDistinct($"y"), sort_array(collect_list($"z"))),
Seq(Row(1, 2, Seq("a", "b")), Row(3, 2, Seq("c", "c", "d"))))
}
+
+ test("SPARK-18004 limit + aggregates") {
+ val df = Seq(("a", 1), ("b", 2), ("c", 1), ("d", 5)).toDF("id", "value")
+ val limit2Df = df.limit(2)
+ checkAnswer(
+ limit2Df.groupBy("id").count().select($"id"),
+ limit2Df.select($"id"))
+ }
}