aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst/src/test
diff options
context:
space:
mode:
authorWenchen Fan <wenchen@databricks.com>2017-01-12 20:21:04 +0800
committerWenchen Fan <wenchen@databricks.com>2017-01-12 20:21:04 +0800
commit871d266649ddfed38c64dfda7158d8bb58d4b979 (patch)
treeaec91eff39e31040e8a380430e20b4d31fdbc436 /sql/catalyst/src/test
parentc71b25481aa5f7bc27d5c979e66bed54cd46b97e (diff)
downloadspark-871d266649ddfed38c64dfda7158d8bb58d4b979.tar.gz
spark-871d266649ddfed38c64dfda7158d8bb58d4b979.tar.bz2
spark-871d266649ddfed38c64dfda7158d8bb58d4b979.zip
[SPARK-18969][SQL] Support grouping by nondeterministic expressions
## What changes were proposed in this pull request? Currently nondeterministic expressions are allowed in `Aggregate`(see the [comment](https://github.com/apache/spark/blob/v2.0.2/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala#L249-L251)), but the `PullOutNondeterministic` analyzer rule failed to handle `Aggregate`, this PR fixes it. close https://github.com/apache/spark/pull/16379 There is still one remaining issue: `SELECT a + rand() FROM t GROUP BY a + rand()` is not allowed, because the 2 `rand()` are different(we generate random seed as the default seed for `rand()`). https://issues.apache.org/jira/browse/SPARK-19035 is tracking this issue. ## How was this patch tested? a new test suite Author: Wenchen Fan <wenchen@databricks.com> Closes #16404 from cloud-fan/groupby.
Diffstat (limited to 'sql/catalyst/src/test')
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/PullOutNondeterministicSuite.scala56
1 files changed, 56 insertions, 0 deletions
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/PullOutNondeterministicSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/PullOutNondeterministicSuite.scala
new file mode 100644
index 0000000000..72e10eadf7
--- /dev/null
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/PullOutNondeterministicSuite.scala
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import org.apache.spark.sql.catalyst.dsl.expressions._
+import org.apache.spark.sql.catalyst.dsl.plans._
+import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
+
+/**
+ * Test suite for moving non-deterministic expressions into Project.
+ */
+class PullOutNondeterministicSuite extends AnalysisTest {
+
+ private lazy val a = 'a.int
+ private lazy val b = 'b.int
+ private lazy val r = LocalRelation(a, b)
+ private lazy val rnd = Rand(10).as('_nondeterministic)
+ private lazy val rndref = rnd.toAttribute
+
+ test("no-op on filter") {
+ checkAnalysis(
+ r.where(Rand(10) > Literal(1.0)),
+ r.where(Rand(10) > Literal(1.0))
+ )
+ }
+
+ test("sort") {
+ checkAnalysis(
+ r.sortBy(SortOrder(Rand(10), Ascending)),
+ r.select(a, b, rnd).sortBy(SortOrder(rndref, Ascending)).select(a, b)
+ )
+ }
+
+ test("aggregate") {
+ checkAnalysis(
+ r.groupBy(Rand(10))(Rand(10).as("rnd")),
+ r.select(a, b, rnd).groupBy(rndref)(rndref.as("rnd"))
+ )
+ }
+}