diff options
author | Weiqing Yang <yangweiqing001@gmail.com> | 2016-12-14 09:48:38 +0800 |
---|---|---|
committer | Wenchen Fan <wenchen@databricks.com> | 2016-12-14 09:48:38 +0800 |
commit | ae5b2d3e46cc4c460f539c4db1688309d1cdc66a (patch) | |
tree | ede19fbb4d34cf4941461d1b9169df13c0c6ba61 /sql/core/src/test | |
parent | 594b14f1ebd0b3db9f630e504be92228f11b4d9f (diff) | |
download | spark-ae5b2d3e46cc4c460f539c4db1688309d1cdc66a.tar.gz spark-ae5b2d3e46cc4c460f539c4db1688309d1cdc66a.tar.bz2 spark-ae5b2d3e46cc4c460f539c4db1688309d1cdc66a.zip |
[SPARK-18746][SQL] Add implicit encoder for BigDecimal, timestamp and date
## What changes were proposed in this pull request?
Add implicit encoders for BigDecimal, timestamp and date.
## How was this patch tested?
Add an unit test. Pass build, unit tests, and some tests below .
Before:
```
scala> spark.createDataset(Seq(new java.math.BigDecimal(10)))
<console>:24: error: Unable to find encoder for type stored in a Dataset. Primitive types (Int, String, etc) and Product types (case classes) are supported by importing spark.implicits._ Support for serializing other types will be added in future releases.
spark.createDataset(Seq(new java.math.BigDecimal(10)))
^
scala>
```
After:
```
scala> spark.createDataset(Seq(new java.math.BigDecimal(10)))
res0: org.apache.spark.sql.Dataset[java.math.BigDecimal] = [value: decimal(38,18)]
```
Author: Weiqing Yang <yangweiqing001@gmail.com>
Closes #16176 from weiqingy/SPARK-18746.
Diffstat (limited to 'sql/core/src/test')
-rw-r--r-- | sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala index 3742115134..c27b815dfa 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala @@ -26,7 +26,6 @@ import org.apache.spark.sql.execution.{LogicalRDD, RDDScanExec, SortExec} import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ShuffleExchange} import org.apache.spark.sql.execution.streaming.MemoryStream import org.apache.spark.sql.functions._ -import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types._ @@ -1129,6 +1128,21 @@ class DatasetSuite extends QueryTest with SharedSQLContext { val ds2 = Seq(WithMap("hi", Map(42L -> "foo"))).toDS checkDataset(ds2.map(t => t), WithMap("hi", Map(42L -> "foo"))) } + + test("SPARK-18746: add implicit encoder for BigDecimal, date, timestamp") { + // For this implicit encoder, 18 is the default scale + assert(spark.range(1).map { x => new java.math.BigDecimal(1) }.head == + new java.math.BigDecimal(1).setScale(18)) + + assert(spark.range(1).map { x => scala.math.BigDecimal(1, 18) }.head == + scala.math.BigDecimal(1, 18)) + + assert(spark.range(1).map { x => new java.sql.Date(2016, 12, 12) }.head == + new java.sql.Date(2016, 12, 12)) + + assert(spark.range(1).map { x => new java.sql.Timestamp(100000) }.head == + new java.sql.Timestamp(100000)) + } } case class WithImmutableMap(id: String, map_test: scala.collection.immutable.Map[Long, String]) |