aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/resources/sql-tests/inputs/having.sql3
-rw-r--r--sql/core/src/test/resources/sql-tests/results/having.sql.out11
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala3
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala16
4 files changed, 25 insertions, 8 deletions
diff --git a/sql/core/src/test/resources/sql-tests/inputs/having.sql b/sql/core/src/test/resources/sql-tests/inputs/having.sql
index 364c022d95..868a911e78 100644
--- a/sql/core/src/test/resources/sql-tests/inputs/having.sql
+++ b/sql/core/src/test/resources/sql-tests/inputs/having.sql
@@ -13,3 +13,6 @@ SELECT count(k) FROM hav GROUP BY v + 1 HAVING v + 1 = 2;
-- SPARK-11032: resolve having correctly
SELECT MIN(t.v) FROM (SELECT * FROM hav WHERE v > 0) t HAVING(COUNT(1) > 0);
+
+-- SPARK-20329: make sure we handle timezones correctly
+SELECT a + b FROM VALUES (1L, 2), (3L, 4) AS T(a, b) GROUP BY a + b HAVING a + b > 1;
diff --git a/sql/core/src/test/resources/sql-tests/results/having.sql.out b/sql/core/src/test/resources/sql-tests/results/having.sql.out
index e092383267..d87ee52216 100644
--- a/sql/core/src/test/resources/sql-tests/results/having.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/having.sql.out
@@ -1,5 +1,5 @@
-- Automatically generated by SQLQueryTestSuite
--- Number of queries: 4
+-- Number of queries: 5
-- !query 0
@@ -38,3 +38,12 @@ SELECT MIN(t.v) FROM (SELECT * FROM hav WHERE v > 0) t HAVING(COUNT(1) > 0)
struct<min(v):int>
-- !query 3 output
1
+
+
+-- !query 4
+SELECT a + b FROM VALUES (1L, 2), (3L, 4) AS T(a, b) GROUP BY a + b HAVING a + b > 1
+-- !query 4 schema
+struct<(a + CAST(b AS BIGINT)):bigint>
+-- !query 4 output
+3
+7
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
index 9b65419dba..ba0ca666b5 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
@@ -90,6 +90,7 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils {
originalDataFrame: DataFrame): Unit = {
// This test verifies parts of the plan. Disable whole stage codegen.
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
+ val strategy = DataSourceStrategy(spark.sessionState.conf)
val bucketedDataFrame = spark.table("bucketed_table").select("i", "j", "k")
val BucketSpec(numBuckets, bucketColumnNames, _) = bucketSpec
// Limit: bucket pruning only works when the bucket column has one and only one column
@@ -98,7 +99,7 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils {
val bucketColumn = bucketedDataFrame.schema.toAttributes(bucketColumnIndex)
val matchedBuckets = new BitSet(numBuckets)
bucketValues.foreach { value =>
- matchedBuckets.set(DataSourceStrategy.getBucketId(bucketColumn, numBuckets, value))
+ matchedBuckets.set(strategy.getBucketId(bucketColumn, numBuckets, value))
}
// Filter could hide the bug in bucket pruning. Thus, skipping all the filters
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala
index b16c9f8fc9..735e07c213 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala
@@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Cast, Expression, Literal}
import org.apache.spark.sql.execution.datasources.DataSourceAnalysis
import org.apache.spark.sql.internal.SQLConf
-import org.apache.spark.sql.types.{IntegerType, StructType}
+import org.apache.spark.sql.types.{DataType, IntegerType, StructType}
class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll {
@@ -49,7 +49,11 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll {
}
Seq(true, false).foreach { caseSensitive =>
- val rule = DataSourceAnalysis(new SQLConf().copy(SQLConf.CASE_SENSITIVE -> caseSensitive))
+ val conf = new SQLConf().copy(SQLConf.CASE_SENSITIVE -> caseSensitive)
+ def cast(e: Expression, dt: DataType): Expression = {
+ Cast(e, dt, Option(conf.sessionLocalTimeZone))
+ }
+ val rule = DataSourceAnalysis(conf)
test(
s"convertStaticPartitions only handle INSERT having at least static partitions " +
s"(caseSensitive: $caseSensitive)") {
@@ -150,7 +154,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll {
if (!caseSensitive) {
val nonPartitionedAttributes = Seq('e.int, 'f.int)
val expected = nonPartitionedAttributes ++
- Seq(Cast(Literal("1"), IntegerType), Cast(Literal("3"), IntegerType))
+ Seq(cast(Literal("1"), IntegerType), cast(Literal("3"), IntegerType))
val actual = rule.convertStaticPartitions(
sourceAttributes = nonPartitionedAttributes,
providedPartitions = Map("b" -> Some("1"), "C" -> Some("3")),
@@ -162,7 +166,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll {
{
val nonPartitionedAttributes = Seq('e.int, 'f.int)
val expected = nonPartitionedAttributes ++
- Seq(Cast(Literal("1"), IntegerType), Cast(Literal("3"), IntegerType))
+ Seq(cast(Literal("1"), IntegerType), cast(Literal("3"), IntegerType))
val actual = rule.convertStaticPartitions(
sourceAttributes = nonPartitionedAttributes,
providedPartitions = Map("b" -> Some("1"), "c" -> Some("3")),
@@ -174,7 +178,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll {
// Test the case having a single static partition column.
{
val nonPartitionedAttributes = Seq('e.int, 'f.int)
- val expected = nonPartitionedAttributes ++ Seq(Cast(Literal("1"), IntegerType))
+ val expected = nonPartitionedAttributes ++ Seq(cast(Literal("1"), IntegerType))
val actual = rule.convertStaticPartitions(
sourceAttributes = nonPartitionedAttributes,
providedPartitions = Map("b" -> Some("1")),
@@ -189,7 +193,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll {
val dynamicPartitionAttributes = Seq('g.int)
val expected =
nonPartitionedAttributes ++
- Seq(Cast(Literal("1"), IntegerType)) ++
+ Seq(cast(Literal("1"), IntegerType)) ++
dynamicPartitionAttributes
val actual = rule.convertStaticPartitions(
sourceAttributes = nonPartitionedAttributes ++ dynamicPartitionAttributes,