aboutsummaryrefslogtreecommitdiff
path: root/examples/src
diff options
context:
space:
mode:
authorsureshthalamati <suresh.thalamati@gmail.com>2017-03-23 17:39:33 -0700
committerXiao Li <gatorsmile@gmail.com>2017-03-23 17:39:33 -0700
commitc7911807050227fcd13161ce090330d9d8daa533 (patch)
treed22689bed1b891c4e988f5334a47b92c06e4fe15 /examples/src
parentb7be05a203b3e2a307147ea0c6cb0dec03da82a2 (diff)
downloadspark-c7911807050227fcd13161ce090330d9d8daa533.tar.gz
spark-c7911807050227fcd13161ce090330d9d8daa533.tar.bz2
spark-c7911807050227fcd13161ce090330d9d8daa533.zip
[SPARK-10849][SQL] Adds option to the JDBC data source write for user to specify database column type for the create table
## What changes were proposed in this pull request? Currently JDBC data source creates tables in the target database using the default type mapping, and the JDBC dialect mechanism.  If users want to specify different database data type for only some of columns, there is no option available. In scenarios where default mapping does not work, users are forced to create tables on the target database before writing. This workaround is probably not acceptable from a usability point of view. This PR is to provide a user-defined type mapping for specific columns. The solution is to allow users to specify database column data type for the create table as JDBC datasource option(createTableColumnTypes) on write. Data type information can be specified in the same format as table schema DDL format (e.g: `name CHAR(64), comments VARCHAR(1024)`). All supported target database types can not be specified , the data types has to be valid spark sql data types also. For example user can not specify target database CLOB data type. This will be supported in the follow-up PR. Example: ```Scala df.write .option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)") .jdbc(url, "TEST.DBCOLTYPETEST", properties) ``` ## How was this patch tested? Added new test cases to the JDBCWriteSuite Author: sureshthalamati <suresh.thalamati@gmail.com> Closes #16209 from sureshthalamati/jdbc_custom_dbtype_option_json-spark-10849.
Diffstat (limited to 'examples/src')
-rw-r--r--examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java5
-rw-r--r--examples/src/main/python/sql/datasource.py6
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala5
3 files changed, 16 insertions, 0 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java
index 82bb284ea3..1a7054614b 100644
--- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java
@@ -258,6 +258,11 @@ public class JavaSQLDataSourceExample {
jdbcDF2.write()
.jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties);
+
+ // Specifying create table column data types on write
+ jdbcDF.write()
+ .option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)")
+ .jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties);
// $example off:jdbc_dataset$
}
}
diff --git a/examples/src/main/python/sql/datasource.py b/examples/src/main/python/sql/datasource.py
index e9aa9d9ac2..e4abb09333 100644
--- a/examples/src/main/python/sql/datasource.py
+++ b/examples/src/main/python/sql/datasource.py
@@ -169,6 +169,12 @@ def jdbc_dataset_example(spark):
jdbcDF2.write \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
+
+ # Specifying create table column data types on write
+ jdbcDF.write \
+ .option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)") \
+ .jdbc("jdbc:postgresql:dbserver", "schema.tablename",
+ properties={"user": "username", "password": "password"})
# $example off:jdbc_dataset$
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala
index 381e69cda8..82fd56de39 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala
@@ -181,6 +181,11 @@ object SQLDataSourceExample {
jdbcDF2.write
.jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties)
+
+ // Specifying create table column data types on write
+ jdbcDF.write
+ .option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)")
+ .jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties)
// $example off:jdbc_dataset$
}
}