aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authoregraldlo <egraldlo@gmail.com>2014-06-10 14:07:55 -0700
committerMichael Armbrust <michael@databricks.com>2014-06-10 14:07:55 -0700
commit1abbde0e89131ad95e793ac1834c392db46b448e (patch)
tree178261b822f4c1636151c7eef2eb4c36b3fb8ac8 /sql
parent55a0e87ee4655106d5e0ed799b11e77f68a17dbb (diff)
downloadspark-1abbde0e89131ad95e793ac1834c392db46b448e.tar.gz
spark-1abbde0e89131ad95e793ac1834c392db46b448e.tar.bz2
spark-1abbde0e89131ad95e793ac1834c392db46b448e.zip
[SQL] Add average overflow test case from #978
By @egraldlo. Author: egraldlo <egraldlo@gmail.com> Author: Michael Armbrust <michael@databricks.com> Closes #1033 from marmbrus/pr/978 and squashes the following commits: e228c5e [Michael Armbrust] Remove "test". 762aeaf [Michael Armbrust] Remove unneeded rule. More descriptive name for test table. d414cd7 [egraldlo] fommatting issues 1153f75 [egraldlo] do best to avoid overflowing in function avg().
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/TestData.scala11
2 files changed, 17 insertions, 0 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index f2d850ad6a..de02bbc7e4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -136,6 +136,12 @@ class SQLQuerySuite extends QueryTest {
2.0)
}
+ test("average overflow") {
+ checkAnswer(
+ sql("SELECT AVG(a),b FROM largeAndSmallInts group by b"),
+ Seq((2147483645.0,1),(2.0,2)))
+ }
+
test("count") {
checkAnswer(
sql("SELECT COUNT(*) FROM testData2"),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala b/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala
index 05de736bbc..330b20b315 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala
@@ -30,6 +30,17 @@ object TestData {
(1 to 100).map(i => TestData(i, i.toString)))
testData.registerAsTable("testData")
+ case class LargeAndSmallInts(a: Int, b: Int)
+ val largeAndSmallInts: SchemaRDD =
+ TestSQLContext.sparkContext.parallelize(
+ LargeAndSmallInts(2147483644, 1) ::
+ LargeAndSmallInts(1, 2) ::
+ LargeAndSmallInts(2147483645, 1) ::
+ LargeAndSmallInts(2, 2) ::
+ LargeAndSmallInts(2147483646, 1) ::
+ LargeAndSmallInts(3, 2) :: Nil)
+ largeAndSmallInts.registerAsTable("largeAndSmallInts")
+
case class TestData2(a: Int, b: Int)
val testData2: SchemaRDD =
TestSQLContext.sparkContext.parallelize(