aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2015-01-29 15:49:34 -0800
committerMichael Armbrust <michael@databricks.com>2015-01-29 15:49:34 -0800
commitc00d517d660ddc3c7b4302651e5567534a819905 (patch)
tree1778128186e1412f3ba76a190e8a1fe4973d7ffc /sql
parentc1b3eebf97b986439f71afd3c4eccf47b90da2cd (diff)
downloadspark-c00d517d660ddc3c7b4302651e5567534a819905.tar.gz
spark-c00d517d660ddc3c7b4302651e5567534a819905.tar.bz2
spark-c00d517d660ddc3c7b4302651e5567534a819905.zip
[SPARK-4296][SQL] Trims aliases when resolving and checking aggregate expressions
I believe that SPARK-4296 has been fixed by 3684fd21e1ffdc0adaad8ff6b31394b637e866ce. I am adding tests based #3910 (change the udf to HiveUDF instead). Author: Yin Huai <yhuai@databricks.com> Author: Cheng Lian <lian@databricks.com> Closes #4010 from yhuai/SPARK-4296-yin and squashes the following commits: 6343800 [Yin Huai] Merge remote-tracking branch 'upstream/master' into SPARK-4296-yin 6cfadd2 [Yin Huai] Actually, this issue has been fixed by 3684fd21e1ffdc0adaad8ff6b31394b637e866ce. d42b707 [Yin Huai] Update comment. 8b3a274 [Yin Huai] Since expressions in grouping expressions can have aliases, which can be used by the outer query block, revert this change. 443538d [Cheng Lian] Trims aliases when resolving and checking aggregate expressions
Diffstat (limited to 'sql')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala15
1 files changed, 15 insertions, 0 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index faa7357b90..eb7a7750af 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -267,4 +267,19 @@ class SQLQuerySuite extends QueryTest {
sql("DROP TABLE nullValuesInInnerComplexTypes")
dropTempTable("testTable")
}
+
+ test("SPARK-4296 Grouping field with Hive UDF as sub expression") {
+ val rdd = sparkContext.makeRDD( """{"a": "str", "b":"1", "c":"1970-01-01 00:00:00"}""" :: Nil)
+ jsonRDD(rdd).registerTempTable("data")
+ checkAnswer(
+ sql("SELECT concat(a, '-', b), year(c) FROM data GROUP BY concat(a, '-', b), year(c)"),
+ Row("str-1", 1970))
+
+ dropTempTable("data")
+
+ jsonRDD(rdd).registerTempTable("data")
+ checkAnswer(sql("SELECT year(c) + 1 FROM data GROUP BY year(c) + 1"), Row(1971))
+
+ dropTempTable("data")
+ }
}