From 1abbde0e89131ad95e793ac1834c392db46b448e Mon Sep 17 00:00:00 2001 From: egraldlo Date: Tue, 10 Jun 2014 14:07:55 -0700 Subject: [SQL] Add average overflow test case from #978 By @egraldlo. Author: egraldlo Author: Michael Armbrust Closes #1033 from marmbrus/pr/978 and squashes the following commits: e228c5e [Michael Armbrust] Remove "test". 762aeaf [Michael Armbrust] Remove unneeded rule. More descriptive name for test table. d414cd7 [egraldlo] fommatting issues 1153f75 [egraldlo] do best to avoid overflowing in function avg(). --- .../src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala | 6 ++++++ sql/core/src/test/scala/org/apache/spark/sql/TestData.scala | 11 +++++++++++ 2 files changed, 17 insertions(+) (limited to 'sql/core') diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala index f2d850ad6a..de02bbc7e4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala @@ -136,6 +136,12 @@ class SQLQuerySuite extends QueryTest { 2.0) } + test("average overflow") { + checkAnswer( + sql("SELECT AVG(a),b FROM largeAndSmallInts group by b"), + Seq((2147483645.0,1),(2.0,2))) + } + test("count") { checkAnswer( sql("SELECT COUNT(*) FROM testData2"), diff --git a/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala b/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala index 05de736bbc..330b20b315 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala @@ -30,6 +30,17 @@ object TestData { (1 to 100).map(i => TestData(i, i.toString))) testData.registerAsTable("testData") + case class LargeAndSmallInts(a: Int, b: Int) + val largeAndSmallInts: SchemaRDD = + TestSQLContext.sparkContext.parallelize( + LargeAndSmallInts(2147483644, 1) :: + LargeAndSmallInts(1, 2) :: + LargeAndSmallInts(2147483645, 1) :: + LargeAndSmallInts(2, 2) :: + LargeAndSmallInts(2147483646, 1) :: + LargeAndSmallInts(3, 2) :: Nil) + largeAndSmallInts.registerAsTable("largeAndSmallInts") + case class TestData2(a: Int, b: Int) val testData2: SchemaRDD = TestSQLContext.sparkContext.parallelize( -- cgit v1.2.3