aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorLiang-Chi Hsieh <viirya@gmail.com>2015-07-06 17:16:44 -0700
committerReynold Xin <rxin@databricks.com>2015-07-06 17:16:44 -0700
commitd4d6d31db5cc5c69ac369f754b7489f444c9ba2f (patch)
tree2b455308b35b3b3149fde84a4ad4f4a94099d31c /sql
parent09a06418debc25da0191d98798f7c5016d39be91 (diff)
downloadspark-d4d6d31db5cc5c69ac369f754b7489f444c9ba2f.tar.gz
spark-d4d6d31db5cc5c69ac369f754b7489f444c9ba2f.tar.bz2
spark-d4d6d31db5cc5c69ac369f754b7489f444c9ba2f.zip
[SPARK-8463][SQL] Use DriverRegistry to load jdbc driver at writing path
JIRA: https://issues.apache.org/jira/browse/SPARK-8463 Currently, at the reading path, `DriverRegistry` is used to load needed jdbc driver at executors. However, at the writing path, we also need `DriverRegistry` to load jdbc driver. Author: Liang-Chi Hsieh <viirya@gmail.com> Closes #6900 from viirya/jdbc_write_driver and squashes the following commits: 16cd04b [Liang-Chi Hsieh] Use DriverRegistry to load jdbc driver at writing path.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/jdbc.scala11
1 files changed, 6 insertions, 5 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/jdbc.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/jdbc.scala
index dd8aaf6474..f7ea852fe7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/jdbc.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/jdbc.scala
@@ -58,13 +58,12 @@ package object jdbc {
* are used.
*/
def savePartition(
- url: String,
+ getConnection: () => Connection,
table: String,
iterator: Iterator[Row],
rddSchema: StructType,
- nullTypes: Array[Int],
- properties: Properties): Iterator[Byte] = {
- val conn = DriverManager.getConnection(url, properties)
+ nullTypes: Array[Int]): Iterator[Byte] = {
+ val conn = getConnection()
var committed = false
try {
conn.setAutoCommit(false) // Everything in the same db transaction.
@@ -185,8 +184,10 @@ package object jdbc {
}
val rddSchema = df.schema
+ val driver: String = DriverRegistry.getDriverClassName(url)
+ val getConnection: () => Connection = JDBCRDD.getConnector(driver, url, properties)
df.foreachPartition { iterator =>
- JDBCWriteDetails.savePartition(url, table, iterator, rddSchema, nullTypes, properties)
+ JDBCWriteDetails.savePartition(getConnection, table, iterator, rddSchema, nullTypes)
}
}