aboutsummaryrefslogtreecommitdiff
path: root/R/pkg
diff options
context:
space:
mode:
authorFelix Cheung <felixcheung_m@hotmail.com>2016-10-26 23:06:11 -0700
committerFelix Cheung <felixcheung@apache.org>2016-10-26 23:06:11 -0700
commit44c8bfda793b7655e2bd1da5e9915a09ed9d42ce (patch)
tree4f42de29a093fe5f845307dca278a80d51bae96c /R/pkg
parent1dbe9896b7f30538a5fad2f5d718d035c7906936 (diff)
downloadspark-44c8bfda793b7655e2bd1da5e9915a09ed9d42ce.tar.gz
spark-44c8bfda793b7655e2bd1da5e9915a09ed9d42ce.tar.bz2
spark-44c8bfda793b7655e2bd1da5e9915a09ed9d42ce.zip
[SQL][DOC] updating doc for JSON source to link to jsonlines.org
## What changes were proposed in this pull request? API and programming guide doc changes for Scala, Python and R. ## How was this patch tested? manual test Author: Felix Cheung <felixcheung_m@hotmail.com> Closes #15629 from felixcheung/jsondoc.
Diffstat (limited to 'R/pkg')
-rw-r--r--R/pkg/R/DataFrame.R3
-rw-r--r--R/pkg/R/SQLContext.R3
2 files changed, 4 insertions, 2 deletions
diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R
index be34e4b32f..1df8bbf9fe 100644
--- a/R/pkg/R/DataFrame.R
+++ b/R/pkg/R/DataFrame.R
@@ -761,7 +761,8 @@ setMethod("toJSON",
#' Save the contents of SparkDataFrame as a JSON file
#'
-#' Save the contents of a SparkDataFrame as a JSON file (one object per line). Files written out
+#' Save the contents of a SparkDataFrame as a JSON file (\href{http://jsonlines.org/}{
+#' JSON Lines text format or newline-delimited JSON}). Files written out
#' with this method can be read back in as a SparkDataFrame using read.json().
#'
#' @param x A SparkDataFrame
diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R
index 0d6a229e63..216ca51666 100644
--- a/R/pkg/R/SQLContext.R
+++ b/R/pkg/R/SQLContext.R
@@ -324,7 +324,8 @@ setMethod("toDF", signature(x = "RDD"),
#' Create a SparkDataFrame from a JSON file.
#'
-#' Loads a JSON file (one object per line), returning the result as a SparkDataFrame
+#' Loads a JSON file (\href{http://jsonlines.org/}{JSON Lines text format or newline-delimited JSON}
+#' ), returning the result as a SparkDataFrame
#' It goes through the entire dataset once to determine the schema.
#'
#' @param path Path of file to read. A vector of multiple paths is allowed.