aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/compatibility/src
diff options
context:
space:
mode:
authorHerman van Hovell <hvanhovell@questtec.nl>2016-01-20 15:13:01 -0800
committerReynold Xin <rxin@databricks.com>2016-01-20 15:13:01 -0800
commit10173279305a0e8a62bfbfe7a9d5d1fd558dd8e1 (patch)
tree94fce3f9499b51ef69ad6bf49a132f54cb8afe6b /sql/hive/compatibility/src
parentf3934a8d656f1668bec065751b2a11411229b6f5 (diff)
downloadspark-10173279305a0e8a62bfbfe7a9d5d1fd558dd8e1.tar.gz
spark-10173279305a0e8a62bfbfe7a9d5d1fd558dd8e1.tar.bz2
spark-10173279305a0e8a62bfbfe7a9d5d1fd558dd8e1.zip
[SPARK-12848][SQL] Change parsed decimal literal datatype from Double to Decimal
The current parser turns a decimal literal, for example ```12.1```, into a Double. The problem with this approach is that we convert an exact literal into a non-exact ```Double```. The PR changes this behavior, a Decimal literal is now converted into an extact ```BigDecimal```. The behavior for scientific decimals, for example ```12.1e01```, is unchanged. This will be converted into a Double. This PR replaces the ```BigDecimal``` literal by a ```Double``` literal, because the ```BigDecimal``` is the default now. You can use the double literal by appending a 'D' to the value, for instance: ```3.141527D``` cc davies rxin Author: Herman van Hovell <hvanhovell@questtec.nl> Closes #10796 from hvanhovell/SPARK-12848.
Diffstat (limited to 'sql/hive/compatibility/src')
-rw-r--r--sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala13
-rw-r--r--sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveWindowFunctionQuerySuite.scala2
2 files changed, 9 insertions, 6 deletions
diff --git a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
index 828ec97105..554d47d651 100644
--- a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
+++ b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
@@ -323,7 +323,14 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
// Feature removed in HIVE-11145
"alter_partition_protect_mode",
"drop_partitions_ignore_protection",
- "protectmode"
+ "protectmode",
+
+ // Spark parser treats numerical literals differently: it creates decimals instead of doubles.
+ "udf_abs",
+ "udf_format_number",
+ "udf_round",
+ "udf_round_3",
+ "view_cast"
)
/**
@@ -884,7 +891,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
"udf_10_trims",
"udf_E",
"udf_PI",
- "udf_abs",
"udf_acos",
"udf_add",
"udf_array",
@@ -928,7 +934,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
"udf_find_in_set",
"udf_float",
"udf_floor",
- "udf_format_number",
"udf_from_unixtime",
"udf_greaterthan",
"udf_greaterthanorequal",
@@ -976,8 +981,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
"udf_regexp_replace",
"udf_repeat",
"udf_rlike",
- "udf_round",
- "udf_round_3",
"udf_rpad",
"udf_rtrim",
"udf_sign",
diff --git a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveWindowFunctionQuerySuite.scala b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveWindowFunctionQuerySuite.scala
index bad3ca6da2..d0b4cbe401 100644
--- a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveWindowFunctionQuerySuite.scala
+++ b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveWindowFunctionQuerySuite.scala
@@ -559,7 +559,7 @@ class HiveWindowFunctionQuerySuite extends HiveComparisonTest with BeforeAndAfte
"""
|select p_mfgr,p_name, p_size,
|histogram_numeric(p_retailprice, 5) over w1 as hist,
- |percentile(p_partkey, 0.5) over w1 as per,
+ |percentile(p_partkey, cast(0.5 as double)) over w1 as per,
|row_number() over(distribute by p_mfgr sort by p_name) as rn
|from part
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name