diff options
author | Reynold Xin <rxin@databricks.com> | 2015-05-16 22:01:53 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2015-05-16 22:02:00 -0700 |
commit | 17e078671ef3214ac2415b4602c62f979713a9fe (patch) | |
tree | 304d2746adbdd4fe09831c93f26815229c1d5f13 /examples/src/main/java | |
parent | 84949104c95d39b56a6fe122180e0d9790560c25 (diff) | |
download | spark-17e078671ef3214ac2415b4602c62f979713a9fe.tar.gz spark-17e078671ef3214ac2415b4602c62f979713a9fe.tar.bz2 spark-17e078671ef3214ac2415b4602c62f979713a9fe.zip |
[SPARK-7654][SQL] Move JDBC into DataFrame's reader/writer interface.
Also moved all the deprecated functions into one place for SQLContext and DataFrame, and updated tests to use the new API.
Author: Reynold Xin <rxin@databricks.com>
Closes #6210 from rxin/df-writer-reader-jdbc and squashes the following commits:
7465c2c [Reynold Xin] Fixed unit test.
118e609 [Reynold Xin] Updated tests.
3441b57 [Reynold Xin] Updated javadoc.
13cdd1c [Reynold Xin] [SPARK-7654][SQL] Move JDBC into DataFrame's reader/writer interface.
(cherry picked from commit 517eb37a85e0a28820bcfd5d98c50d02df6521c6)
Signed-off-by: Reynold Xin <rxin@databricks.com>
Diffstat (limited to 'examples/src/main/java')
-rw-r--r-- | examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java index 173633ce05..afee279ec3 100644 --- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java +++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java @@ -94,7 +94,7 @@ public class JavaSparkSQL { System.out.println("=== Data source: Parquet File ==="); // DataFrames can be saved as parquet files, maintaining the schema information. - schemaPeople.saveAsParquetFile("people.parquet"); + schemaPeople.write().parquet("people.parquet"); // Read in the parquet file created above. // Parquet files are self-describing so the schema is preserved. @@ -151,7 +151,7 @@ public class JavaSparkSQL { List<String> jsonData = Arrays.asList( "{\"name\":\"Yin\",\"address\":{\"city\":\"Columbus\",\"state\":\"Ohio\"}}"); JavaRDD<String> anotherPeopleRDD = ctx.parallelize(jsonData); - DataFrame peopleFromJsonRDD = sqlContext.jsonRDD(anotherPeopleRDD.rdd()); + DataFrame peopleFromJsonRDD = sqlContext.read().json(anotherPeopleRDD.rdd()); // Take a look at the schema of this new DataFrame. peopleFromJsonRDD.printSchema(); |