aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorBrandon Bradley <bradleytastic@gmail.com>2016-01-28 15:25:57 -0800
committerReynold Xin <rxin@databricks.com>2016-01-28 15:25:57 -0800
commit3a40c0e575fd4215302ea60c9821d31a5a138b8a (patch)
treefc77ca55ccb10ed9a251ce386be7304608c1f81d /sql
parentabae889f08eb412cb897e4e63614ec2c93885ffd (diff)
downloadspark-3a40c0e575fd4215302ea60c9821d31a5a138b8a.tar.gz
spark-3a40c0e575fd4215302ea60c9821d31a5a138b8a.tar.bz2
spark-3a40c0e575fd4215302ea60c9821d31a5a138b8a.zip
[SPARK-12749][SQL] add json option to parse floating-point types as DecimalType
I tried to add this via `USE_BIG_DECIMAL_FOR_FLOATS` option from Jackson with no success. Added test for non-complex types. Should I add a test for complex types? Author: Brandon Bradley <bradleytastic@gmail.com> Closes #10936 from blbradley/spark-12749.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala28
4 files changed, 38 insertions, 2 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index 634c1bd473..2e0c6c7df9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -252,6 +252,8 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
*
* You can set the following JSON-specific options to deal with non-standard JSON files:
* <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li>
+ * <li>`floatAsBigDecimal` (default `false`): infers all floating-point values as a decimal
+ * type</li>
* <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li>
* <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li>
* <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala
index 44d5e4ff7e..8b773ddfcb 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/InferSchema.scala
@@ -134,8 +134,12 @@ private[json] object InferSchema {
val v = parser.getDecimalValue
DecimalType(v.precision(), v.scale())
case FLOAT | DOUBLE =>
- // TODO(davies): Should we use decimal if possible?
- DoubleType
+ if (configOptions.floatAsBigDecimal) {
+ val v = parser.getDecimalValue
+ DecimalType(v.precision(), v.scale())
+ } else {
+ DoubleType
+ }
}
case VALUE_TRUE | VALUE_FALSE => BooleanType
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala
index fe5b20697e..31a95ed461 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JSONOptions.scala
@@ -34,6 +34,8 @@ private[sql] class JSONOptions(
parameters.get("samplingRatio").map(_.toDouble).getOrElse(1.0)
val primitivesAsString =
parameters.get("primitivesAsString").map(_.toBoolean).getOrElse(false)
+ val floatAsBigDecimal =
+ parameters.get("floatAsBigDecimal").map(_.toBoolean).getOrElse(false)
val allowComments =
parameters.get("allowComments").map(_.toBoolean).getOrElse(false)
val allowUnquotedFieldNames =
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 00eaeb0d34..dd83a0e36f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -771,6 +771,34 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
)
}
+ test("Loading a JSON dataset floatAsBigDecimal returns schema with float types as BigDecimal") {
+ val jsonDF = sqlContext.read.option("floatAsBigDecimal", "true").json(primitiveFieldAndType)
+
+ val expectedSchema = StructType(
+ StructField("bigInteger", DecimalType(20, 0), true) ::
+ StructField("boolean", BooleanType, true) ::
+ StructField("double", DecimalType(17, -292), true) ::
+ StructField("integer", LongType, true) ::
+ StructField("long", LongType, true) ::
+ StructField("null", StringType, true) ::
+ StructField("string", StringType, true) :: Nil)
+
+ assert(expectedSchema === jsonDF.schema)
+
+ jsonDF.registerTempTable("jsonTable")
+
+ checkAnswer(
+ sql("select * from jsonTable"),
+ Row(BigDecimal("92233720368547758070"),
+ true,
+ BigDecimal("1.7976931348623157E308"),
+ 10,
+ 21474836470L,
+ null,
+ "this is a simple string.")
+ )
+ }
+
test("Loading a JSON dataset from a text file with SQL") {
val dir = Utils.createTempDir()
dir.delete()