aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2015-06-19 16:54:51 -0700
committerYin Huai <yhuai@databricks.com>2015-06-19 16:54:51 -0700
commita333a72e029d2546a66b36d6b3458e965430c530 (patch)
tree417bd90555d96869f11357af7c373be50e206b53 /sql
parent9814b971f07dff8a99f1b8ad2adf70614f1c690b (diff)
downloadspark-a333a72e029d2546a66b36d6b3458e965430c530.tar.gz
spark-a333a72e029d2546a66b36d6b3458e965430c530.tar.bz2
spark-a333a72e029d2546a66b36d6b3458e965430c530.zip
[SPARK-8420] [SQL] Fix comparision of timestamps/dates with strings
In earlier versions of Spark SQL we casted `TimestampType` and `DataType` to `StringType` when it was involved in a binary comparison with a `StringType`. This allowed comparing a timestamp with a partial date as a user would expect. - `time > "2014-06-10"` - `time > "2014"` In 1.4.0 we tried to cast the String instead into a Timestamp. However, since partial dates are not a valid complete timestamp this results in `null` which results in the tuple being filtered. This PR restores the earlier behavior. Note that we still special case equality so that these comparisons are not affected by not printing zeros for subsecond precision. Author: Michael Armbrust <michael@databricks.com> Closes #6888 from marmbrus/timeCompareString and squashes the following commits: bdef29c [Michael Armbrust] test partial date 1f09adf [Michael Armbrust] special handling of equality 1172c60 [Michael Armbrust] more test fixing 4dfc412 [Michael Armbrust] fix tests aaa9508 [Michael Armbrust] newline 04d908f [Michael Armbrust] [SPARK-8420][SQL] Fix comparision of timestamps/dates with strings
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala17
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala9
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameDateSuite.scala56
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/TestData.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/InMemoryColumnarQuerySuite.scala7
6 files changed, 88 insertions, 11 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
index 8012b224eb..d4ab1fc643 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
@@ -277,15 +277,26 @@ trait HiveTypeCoercion {
case a @ BinaryArithmetic(left, right @ StringType()) =>
a.makeCopy(Array(left, Cast(right, DoubleType)))
- // we should cast all timestamp/date/string compare into string compare
+ // For equality between string and timestamp we cast the string to a timestamp
+ // so that things like rounding of subsecond precision does not affect the comparison.
+ case p @ Equality(left @ StringType(), right @ TimestampType()) =>
+ p.makeCopy(Array(Cast(left, TimestampType), right))
+ case p @ Equality(left @ TimestampType(), right @ StringType()) =>
+ p.makeCopy(Array(left, Cast(right, TimestampType)))
+
+ // We should cast all relative timestamp/date/string comparison into string comparisions
+ // This behaves as a user would expect because timestamp strings sort lexicographically.
+ // i.e. TimeStamp(2013-01-01 00:00 ...) < "2014" = true
case p @ BinaryComparison(left @ StringType(), right @ DateType()) =>
p.makeCopy(Array(left, Cast(right, StringType)))
case p @ BinaryComparison(left @ DateType(), right @ StringType()) =>
p.makeCopy(Array(Cast(left, StringType), right))
case p @ BinaryComparison(left @ StringType(), right @ TimestampType()) =>
- p.makeCopy(Array(Cast(left, TimestampType), right))
+ p.makeCopy(Array(left, Cast(right, StringType)))
case p @ BinaryComparison(left @ TimestampType(), right @ StringType()) =>
- p.makeCopy(Array(left, Cast(right, TimestampType)))
+ p.makeCopy(Array(Cast(left, StringType), right))
+
+ // Comparisons between dates and timestamps.
case p @ BinaryComparison(left @ TimestampType(), right @ DateType()) =>
p.makeCopy(Array(Cast(left, StringType), Cast(right, StringType)))
case p @ BinaryComparison(left @ DateType(), right @ TimestampType()) =>
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
index 082d72eb43..3a12d03ba6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
@@ -266,6 +266,15 @@ private[sql] object BinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = Some((e.left, e.right))
}
+/** An extractor that matches both standard 3VL equality and null-safe equality. */
+private[sql] object Equality {
+ def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
+ case EqualTo(l, r) => Some((l, r))
+ case EqualNullSafe(l, r) => Some((l, r))
+ case _ => None
+ }
+}
+
case class EqualTo(left: Expression, right: Expression) extends BinaryComparison {
override def symbol: String = "="
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameDateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameDateSuite.scala
new file mode 100644
index 0000000000..a4719a38de
--- /dev/null
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameDateSuite.scala
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql
+
+import java.sql.{Date, Timestamp}
+
+class DataFrameDateTimeSuite extends QueryTest {
+
+ private lazy val ctx = org.apache.spark.sql.test.TestSQLContext
+ import ctx.implicits._
+
+ test("timestamp comparison with date strings") {
+ val df = Seq(
+ (1, Timestamp.valueOf("2015-01-01 00:00:00")),
+ (2, Timestamp.valueOf("2014-01-01 00:00:00"))).toDF("i", "t")
+
+ checkAnswer(
+ df.select("t").filter($"t" <= "2014-06-01"),
+ Row(Timestamp.valueOf("2014-01-01 00:00:00")) :: Nil)
+
+
+ checkAnswer(
+ df.select("t").filter($"t" >= "2014-06-01"),
+ Row(Timestamp.valueOf("2015-01-01 00:00:00")) :: Nil)
+ }
+
+ test("date comparison with date strings") {
+ val df = Seq(
+ (1, Date.valueOf("2015-01-01")),
+ (2, Date.valueOf("2014-01-01"))).toDF("i", "t")
+
+ checkAnswer(
+ df.select("t").filter($"t" <= "2014-06-01"),
+ Row(Date.valueOf("2014-01-01")) :: Nil)
+
+
+ checkAnswer(
+ df.select("t").filter($"t" >= "2015"),
+ Row(Date.valueOf("2015-01-01")) :: Nil)
+ }
+}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 82f3fdb48b..4441afd6bd 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -19,6 +19,8 @@ package org.apache.spark.sql
import org.scalatest.BeforeAndAfterAll
+import java.sql.Timestamp
+
import org.apache.spark.sql.catalyst.DefaultParserDialect
import org.apache.spark.sql.catalyst.errors.DialectException
import org.apache.spark.sql.execution.GeneratedAggregate
@@ -345,6 +347,8 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll with SQLTestUtils {
}
test("SPARK-3173 Timestamp support in the parser") {
+ (0 to 3).map(i => Tuple1(new Timestamp(i))).toDF("time").registerTempTable("timestamps")
+
checkAnswer(sql(
"SELECT time FROM timestamps WHERE time='1969-12-31 16:00:00.0'"),
Row(java.sql.Timestamp.valueOf("1969-12-31 16:00:00")))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala b/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala
index 725a18bfae..520a862ea0 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/TestData.scala
@@ -174,12 +174,6 @@ object TestData {
"3, C3, true, null" ::
"4, D4, true, 2147483644" :: Nil)
- case class TimestampField(time: Timestamp)
- val timestamps = TestSQLContext.sparkContext.parallelize((0 to 3).map { i =>
- TimestampField(new Timestamp(i))
- })
- timestamps.toDF().registerTempTable("timestamps")
-
case class IntField(i: Int)
// An RDD with 4 elements and 8 partitions
val withEmptyParts = TestSQLContext.sparkContext.parallelize((1 to 4).map(IntField), 8)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/InMemoryColumnarQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/InMemoryColumnarQuerySuite.scala
index 12f95eb557..01bc23277f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/InMemoryColumnarQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/InMemoryColumnarQuerySuite.scala
@@ -91,15 +91,18 @@ class InMemoryColumnarQuerySuite extends QueryTest {
}
test("SPARK-2729 regression: timestamp data type") {
+ val timestamps = (0 to 3).map(i => Tuple1(new Timestamp(i))).toDF("time")
+ timestamps.registerTempTable("timestamps")
+
checkAnswer(
sql("SELECT time FROM timestamps"),
- timestamps.collect().toSeq.map(Row.fromTuple))
+ timestamps.collect().toSeq)
ctx.cacheTable("timestamps")
checkAnswer(
sql("SELECT time FROM timestamps"),
- timestamps.collect().toSeq.map(Row.fromTuple))
+ timestamps.collect().toSeq)
}
test("SPARK-3320 regression: batched column buffer building should work with empty partitions") {