aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/java
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-15 22:00:31 -0700
committerReynold Xin <rxin@databricks.com>2015-05-15 22:00:31 -0700
commit578bfeeff514228f6fd4b07a536815fbb3510f7e (patch)
tree97964df2b0b7ada4f019f2cd9617ba6af1d59f52 /sql/core/src/test/java
parentdeb411335a09b91eb1f75421d77e1c3686719621 (diff)
downloadspark-578bfeeff514228f6fd4b07a536815fbb3510f7e.tar.gz
spark-578bfeeff514228f6fd4b07a536815fbb3510f7e.tar.bz2
spark-578bfeeff514228f6fd4b07a536815fbb3510f7e.zip
[SPARK-7654][SQL] DataFrameReader and DataFrameWriter for input/output API
This patch introduces DataFrameWriter and DataFrameReader. DataFrameReader interface, accessible through SQLContext.read, contains methods that create DataFrames. These methods used to reside in SQLContext. Example usage: ```scala sqlContext.read.json("...") sqlContext.read.parquet("...") ``` DataFrameWriter interface, accessible through DataFrame.write, implements a builder pattern to avoid the proliferation of options in writing DataFrame out. It currently implements: - mode - format (e.g. "parquet", "json") - options (generic options passed down into data sources) - partitionBy (partitioning columns) Example usage: ```scala df.write.mode("append").format("json").partitionBy("date").saveAsTable("myJsonTable") ``` TODO: - [ ] Documentation update - [ ] Move JDBC into reader / writer? - [ ] Deprecate the old interfaces - [ ] Move the generic load interface into reader. - [ ] Update example code and documentation Author: Reynold Xin <rxin@databricks.com> Closes #6175 from rxin/reader-writer and squashes the following commits: b146c95 [Reynold Xin] Deprecation of old APIs. bd8abdf [Reynold Xin] Fixed merge conflict. 26abea2 [Reynold Xin] Added general load methods. 244fbec [Reynold Xin] Added equivalent to example. 4f15d92 [Reynold Xin] Added documentation for partitionBy. 7e91611 [Reynold Xin] [SPARK-7654][SQL] DataFrameReader and DataFrameWriter for input/output API.
Diffstat (limited to 'sql/core/src/test/java')
-rw-r--r--sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java8
1 files changed, 4 insertions, 4 deletions
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
index b76f7d421f..6a0bcefe7a 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
@@ -75,9 +75,9 @@ public class JavaSaveLoadSuite {
public void saveAndLoad() {
Map<String, String> options = new HashMap<String, String>();
options.put("path", path.toString());
- df.save("org.apache.spark.sql.json", SaveMode.ErrorIfExists, options);
+ df.save("json", SaveMode.ErrorIfExists, options);
- DataFrame loadedDF = sqlContext.load("org.apache.spark.sql.json", options);
+ DataFrame loadedDF = sqlContext.read().format("json").options(options).load();
checkAnswer(loadedDF, df.collectAsList());
}
@@ -86,12 +86,12 @@ public class JavaSaveLoadSuite {
public void saveAndLoadWithSchema() {
Map<String, String> options = new HashMap<String, String>();
options.put("path", path.toString());
- df.save("org.apache.spark.sql.json", SaveMode.ErrorIfExists, options);
+ df.save("json", SaveMode.ErrorIfExists, options);
List<StructField> fields = new ArrayList<StructField>();
fields.add(DataTypes.createStructField("b", DataTypes.StringType, true));
StructType schema = DataTypes.createStructType(fields);
- DataFrame loadedDF = sqlContext.load("org.apache.spark.sql.json", schema, options);
+ DataFrame loadedDF = sqlContext.load("json", schema, options);
checkAnswer(loadedDF, sqlContext.sql("SELECT b FROM jsonTable").collectAsList());
}