aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
authorFelix Cheung <felixcheung_m@hotmail.com>2017-03-02 01:02:38 -0800
committerFelix Cheung <felixcheung@apache.org>2017-03-02 01:02:38 -0800
commit8d6ef895ee492b8febbaac7ab2ef2c907b48fa4a (patch)
tree497d97fb9ca0c033baccc47eb5fddb4196fbf36a /sql/core/src
parentd2a879762a2b4f3c4d703cc183275af12b3c7de1 (diff)
downloadspark-8d6ef895ee492b8febbaac7ab2ef2c907b48fa4a.tar.gz
spark-8d6ef895ee492b8febbaac7ab2ef2c907b48fa4a.tar.bz2
spark-8d6ef895ee492b8febbaac7ab2ef2c907b48fa4a.zip
[SPARK-18352][DOCS] wholeFile JSON update doc and programming guide
## What changes were proposed in this pull request? Update doc for R, programming guide. Clarify default behavior for all languages. ## How was this patch tested? manually Author: Felix Cheung <felixcheung_m@hotmail.com> Closes #17128 from felixcheung/jsonwholefiledoc.
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala4
2 files changed, 4 insertions, 4 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index 63be1e5302..41470ae6aa 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -263,8 +263,8 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
/**
* Loads a JSON file and returns the results as a `DataFrame`.
*
- * Both JSON (one record per file) and <a href="http://jsonlines.org/">JSON Lines</a>
- * (newline-delimited JSON) are supported and can be selected with the `wholeFile` option.
+ * <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by
+ * default. For JSON (one record per file), set the `wholeFile` option to true.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
index 6a275281d8..aed8074a64 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
@@ -143,8 +143,8 @@ final class DataStreamReader private[sql](sparkSession: SparkSession) extends Lo
/**
* Loads a JSON file stream and returns the results as a `DataFrame`.
*
- * Both JSON (one record per file) and <a href="http://jsonlines.org/">JSON Lines</a>
- * (newline-delimited JSON) are supported and can be selected with the `wholeFile` option.
+ * <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by
+ * default. For JSON (one record per file), set the `wholeFile` option to true.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.