aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/main/scala/org
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-03-15 14:57:54 -0700
committerYin Huai <yhuai@databricks.com>2016-03-15 14:57:54 -0700
commit643649dcbfabc5d6952c2ecfb98286324c887665 (patch)
tree571751a4a87ba11fbd0e5a5c582987a2ca53b887 /sql/core/src/main/scala/org
parent41eaabf5935052fc69f1657368fa17529d18f84b (diff)
downloadspark-643649dcbfabc5d6952c2ecfb98286324c887665.tar.gz
spark-643649dcbfabc5d6952c2ecfb98286324c887665.tar.bz2
spark-643649dcbfabc5d6952c2ecfb98286324c887665.zip
[SPARK-13895][SQL] DataFrameReader.text should return Dataset[String]
## What changes were proposed in this pull request? This patch changes DataFrameReader.text()'s return type from DataFrame to Dataset[String]. Closes #11731. ## How was this patch tested? Updated existing integration tests to reflect the change. Author: Reynold Xin <rxin@databricks.com> Closes #11739 from rxin/SPARK-13895.
Diffstat (limited to 'sql/core/src/main/scala/org')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala12
1 files changed, 8 insertions, 4 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index 57c978bec8..ef85f1db89 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -399,8 +399,10 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
}
/**
- * Loads a text file and returns a [[DataFrame]] with a single string column named "value".
- * Each line in the text file is a new row in the resulting DataFrame. For example:
+ * Loads a text file and returns a [[Dataset]] of String. The underlying schema of the Dataset
+ * contains a single string column named "value".
+ *
+ * Each line in the text file is a new row in the resulting Dataset. For example:
* {{{
* // Scala:
* sqlContext.read.text("/path/to/spark/README.md")
@@ -410,10 +412,12 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
* }}}
*
* @param paths input path
- * @since 1.6.0
+ * @since 2.0.0
*/
@scala.annotation.varargs
- def text(paths: String*): DataFrame = format("text").load(paths : _*)
+ def text(paths: String*): Dataset[String] = {
+ format("text").load(paths : _*).as[String](sqlContext.implicits.newStringEncoder)
+ }
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options