From e2ab79d5ea00af45c083cc9a6607d2f0905f9908 Mon Sep 17 00:00:00 2001 From: Wenchen Fan Date: Sun, 12 Jun 2016 21:36:41 -0700 Subject: [SPARK-15898][SQL] DataFrameReader.text should return DataFrame ## What changes were proposed in this pull request? We want to maintain API compatibility for DataFrameReader.text, and will introduce a new API called DataFrameReader.textFile which returns Dataset[String]. affected PRs: https://github.com/apache/spark/pull/11731 https://github.com/apache/spark/pull/13104 https://github.com/apache/spark/pull/13184 ## How was this patch tested? N/A Author: Wenchen Fan Closes #13604 from cloud-fan/revert. --- R/pkg/R/SQLContext.R | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'R') diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R index 584bbbf0e4..e7e9e353f9 100644 --- a/R/pkg/R/SQLContext.R +++ b/R/pkg/R/SQLContext.R @@ -364,9 +364,10 @@ parquetFile <- function(x, ...) { #' Create a SparkDataFrame from a text file. #' -#' Loads a text file and returns a SparkDataFrame with a single string column named "value". -#' If the directory structure of the text files contains partitioning information, those are -#' ignored in the resulting DataFrame. +#' Loads text files and returns a SparkDataFrame whose schema starts with +#' a string column named "value", and followed by partitioned columns if +#' there are any. +#' #' Each line in the text file is a new row in the resulting SparkDataFrame. #' #' @param path Path of file to read. A vector of multiple paths is allowed. -- cgit v1.2.3