diff options
author | Travis Hegner <thegner@trilliumit.com> | 2015-11-05 12:35:23 -0800 |
---|---|---|
committer | Yin Huai <yhuai@databricks.com> | 2015-11-05 12:36:57 -0800 |
commit | 14ee0f5726f96e2c4c28ac328d43fd85a0630b48 (patch) | |
tree | 28925a51fd5d499c395427ed647f6e11837a051b /sql | |
parent | f80f7b69a3f81d0ea879a31c769d17ffbbac74aa (diff) | |
download | spark-14ee0f5726f96e2c4c28ac328d43fd85a0630b48.tar.gz spark-14ee0f5726f96e2c4c28ac328d43fd85a0630b48.tar.bz2 spark-14ee0f5726f96e2c4c28ac328d43fd85a0630b48.zip |
[SPARK-10648] Oracle dialect to handle nonspecific numeric types
This is the alternative/agreed upon solution to PR #8780.
Creating an OracleDialect to handle the nonspecific numeric types that can be defined in oracle.
Author: Travis Hegner <thegner@trilliumit.com>
Closes #9495 from travishegner/OracleDialect.
Diffstat (limited to 'sql')
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala index 88ae83957a..f9a6a09b62 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala @@ -139,6 +139,7 @@ object JdbcDialects { registerDialect(DB2Dialect) registerDialect(MsSqlServerDialect) registerDialect(DerbyDialect) + registerDialect(OracleDialect) /** @@ -315,3 +316,27 @@ case object DerbyDialect extends JdbcDialect { } +/** + * :: DeveloperApi :: + * Default Oracle dialect, mapping a nonspecific numeric type to a general decimal type. + */ +@DeveloperApi +case object OracleDialect extends JdbcDialect { + override def canHandle(url: String): Boolean = url.startsWith("jdbc:oracle") + override def getCatalystType( + sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = { + // Handle NUMBER fields that have no precision/scale in special way + // because JDBC ResultSetMetaData converts this to 0 procision and -127 scale + // For more details, please see + // https://github.com/apache/spark/pull/8780#issuecomment-145598968 + // and + // https://github.com/apache/spark/pull/8780#issuecomment-144541760 + if (sqlType == Types.NUMERIC && size == 0) { + // This is sub-optimal as we have to pick a precision/scale in advance whereas the data + // in Oracle is allowed to have different precision/scale for each value. + Some(DecimalType(DecimalType.MAX_PRECISION, 10)) + } else { + None + } + } +} |