aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorpoolis <gmichalopoulos@gmail.com>2016-05-02 16:15:07 -0700
committerReynold Xin <rxin@databricks.com>2016-05-02 16:15:07 -0700
commit917d05f43bddc1728735979fe7e62fe631b35e6f (patch)
tree019a21e0f55cc6d558d484de206f540eea46408e /sql
parentca1b2198581b8de1651a88fc97540570a2347dc9 (diff)
downloadspark-917d05f43bddc1728735979fe7e62fe631b35e6f.tar.gz
spark-917d05f43bddc1728735979fe7e62fe631b35e6f.tar.bz2
spark-917d05f43bddc1728735979fe7e62fe631b35e6f.zip
[SPARK-12928][SQL] Oracle FLOAT datatype is not properly handled when reading via JDBC
The contribution is my original work and that I license the work to the project under the project's open source license. Author: poolis <gmichalopoulos@gmail.com> Author: Greg Michalopoulos <gmichalopoulos@gmail.com> Closes #10899 from poolis/spark-12928.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala9
2 files changed, 15 insertions, 0 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
index 46b3877a7c..b795e8b42d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
@@ -38,6 +38,12 @@ private case object OracleDialect extends JdbcDialect {
// This is sub-optimal as we have to pick a precision/scale in advance whereas the data
// in Oracle is allowed to have different precision/scale for each value.
Option(DecimalType(DecimalType.MAX_PRECISION, 10))
+ } else if (sqlType == Types.NUMERIC && md.build().getLong("scale") == -127) {
+ // Handle FLOAT fields in a special way because JDBC ResultSetMetaData converts
+ // this to NUMERIC with -127 scale
+ // Not sure if there is a more robust way to identify the field as a float (or other
+ // numeric types that do not specify a scale.
+ Option(DecimalType(DecimalType.MAX_PRECISION, 10))
} else {
None
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index 783511b781..47a1017caa 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -600,6 +600,15 @@ class JDBCSuite extends SparkFunSuite
assert(derbyDialect.getJDBCType(BooleanType).map(_.databaseTypeDefinition).get == "BOOLEAN")
}
+ test("OracleDialect jdbc type mapping") {
+ val oracleDialect = JdbcDialects.get("jdbc:oracle")
+ val metadata = new MetadataBuilder().putString("name", "test_column").putLong("scale", -127)
+ assert(oracleDialect.getCatalystType(java.sql.Types.NUMERIC, "float", 1, metadata) ==
+ Some(DecimalType(DecimalType.MAX_PRECISION, 10)))
+ assert(oracleDialect.getCatalystType(java.sql.Types.NUMERIC, "numeric", 0, null) ==
+ Some(DecimalType(DecimalType.MAX_PRECISION, 10)))
+ }
+
test("table exists query by jdbc dialect") {
val MySQL = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
val Postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")