aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2015-09-17 11:14:52 -0700
committerMichael Armbrust <michael@databricks.com>2015-09-17 11:14:52 -0700
commitaad644fbe29151aec9004817d42e4928bdb326f3 (patch)
tree77bfa902698f82e6e2547e9bc70dfec46bd0970f /sql/catalyst
parente0dc2bc232206d2f4da4278502c1f88babc8b55a (diff)
downloadspark-aad644fbe29151aec9004817d42e4928bdb326f3.tar.gz
spark-aad644fbe29151aec9004817d42e4928bdb326f3.tar.bz2
spark-aad644fbe29151aec9004817d42e4928bdb326f3.zip
[SPARK-10639] [SQL] Need to convert UDAF's result from scala to sql type
https://issues.apache.org/jira/browse/SPARK-10639 Author: Yin Huai <yhuai@databricks.com> Closes #8788 from yhuai/udafConversion.
Diffstat (limited to 'sql/catalyst')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala7
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala16
2 files changed, 21 insertions, 2 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
index 966623ed01..f25591794a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
@@ -138,8 +138,13 @@ object CatalystTypeConverters {
private case class UDTConverter(
udt: UserDefinedType[_]) extends CatalystTypeConverter[Any, Any, Any] {
+ // toCatalyst (it calls toCatalystImpl) will do null check.
override def toCatalystImpl(scalaValue: Any): Any = udt.serialize(scalaValue)
- override def toScala(catalystValue: Any): Any = udt.deserialize(catalystValue)
+
+ override def toScala(catalystValue: Any): Any = {
+ if (catalystValue == null) null else udt.deserialize(catalystValue)
+ }
+
override def toScalaImpl(row: InternalRow, column: Int): Any =
toScala(row.get(column, udt.sqlType))
}
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
index 4025cbcec1..e48395028e 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/RandomDataGenerator.scala
@@ -108,7 +108,21 @@ object RandomDataGenerator {
arr
})
case BooleanType => Some(() => rand.nextBoolean())
- case DateType => Some(() => new java.sql.Date(rand.nextInt()))
+ case DateType =>
+ val generator =
+ () => {
+ var milliseconds = rand.nextLong() % 253402329599999L
+ // -62135740800000L is the number of milliseconds before January 1, 1970, 00:00:00 GMT
+ // for "0001-01-01 00:00:00.000000". We need to find a
+ // number that is greater or equals to this number as a valid timestamp value.
+ while (milliseconds < -62135740800000L) {
+ // 253402329599999L is the the number of milliseconds since
+ // January 1, 1970, 00:00:00 GMT for "9999-12-31 23:59:59.999999".
+ milliseconds = rand.nextLong() % 253402329599999L
+ }
+ DateTimeUtils.toJavaDate((milliseconds / DateTimeUtils.MILLIS_PER_DAY).toInt)
+ }
+ Some(generator)
case TimestampType =>
val generator =
() => {