aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/java/org/apache
diff options
context:
space:
mode:
authorWenchen Fan <wenchen@databricks.com>2016-06-12 21:36:41 -0700
committerReynold Xin <rxin@databricks.com>2016-06-12 21:36:41 -0700
commite2ab79d5ea00af45c083cc9a6607d2f0905f9908 (patch)
tree750a843ae6ddba4abc3cf592a26960fb6de19189 /examples/src/main/java/org/apache
parent1f8f2b5c2a33e63367ea4881b5918f6bc0a6f52f (diff)
downloadspark-e2ab79d5ea00af45c083cc9a6607d2f0905f9908.tar.gz
spark-e2ab79d5ea00af45c083cc9a6607d2f0905f9908.tar.bz2
spark-e2ab79d5ea00af45c083cc9a6607d2f0905f9908.zip
[SPARK-15898][SQL] DataFrameReader.text should return DataFrame
## What changes were proposed in this pull request? We want to maintain API compatibility for DataFrameReader.text, and will introduce a new API called DataFrameReader.textFile which returns Dataset[String]. affected PRs: https://github.com/apache/spark/pull/11731 https://github.com/apache/spark/pull/13104 https://github.com/apache/spark/pull/13184 ## How was this patch tested? N/A Author: Wenchen Fan <wenchen@databricks.com> Closes #13604 from cloud-fan/revert.
Diffstat (limited to 'examples/src/main/java/org/apache')
-rw-r--r--examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java2
-rw-r--r--examples/src/main/java/org/apache/spark/examples/JavaPageRank.java2
-rw-r--r--examples/src/main/java/org/apache/spark/examples/JavaWordCount.java2
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaALSExample.java2
-rw-r--r--examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java2
5 files changed, 5 insertions, 5 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java b/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java
index ded442096c..362bd4435e 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaHdfsLR.java
@@ -126,7 +126,7 @@ public final class JavaHdfsLR {
.appName("JavaHdfsLR")
.getOrCreate();
- JavaRDD<String> lines = spark.read().text(args[0]).javaRDD();
+ JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
JavaRDD<DataPoint> points = lines.map(new ParsePoint()).cache();
int ITERATIONS = Integer.parseInt(args[1]);
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
index 128b5ab17c..ed0bb87657 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
@@ -82,7 +82,7 @@ public final class JavaPageRank {
// URL neighbor URL
// URL neighbor URL
// ...
- JavaRDD<String> lines = spark.read().text(args[0]).javaRDD();
+ JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
// Loads all URLs from input file and initialize their neighbors.
JavaPairRDD<String, Iterable<String>> links = lines.mapToPair(
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
index 1caee60e34..8f18604c07 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
@@ -46,7 +46,7 @@ public final class JavaWordCount {
.appName("JavaWordCount")
.getOrCreate();
- JavaRDD<String> lines = spark.read().text(args[0]).javaRDD();
+ JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaALSExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaALSExample.java
index 7f568f4e0d..739558e81f 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaALSExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaALSExample.java
@@ -87,7 +87,7 @@ public class JavaALSExample {
// $example on$
JavaRDD<Rating> ratingsRDD = spark
- .read().text("data/mllib/als/sample_movielens_ratings.txt").javaRDD()
+ .read().textFile("data/mllib/als/sample_movielens_ratings.txt").javaRDD()
.map(new Function<String, Rating>() {
public Rating call(String str) {
return Rating.parseRating(str);
diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
index 55e591d0ce..e512979ac7 100644
--- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
+++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
@@ -59,7 +59,7 @@ public class JavaSparkSQL {
System.out.println("=== Data source: RDD ===");
// Load a text file and convert each line to a Java Bean.
String file = "examples/src/main/resources/people.txt";
- JavaRDD<Person> people = spark.read().text(file).javaRDD().map(
+ JavaRDD<Person> people = spark.read().textFile(file).javaRDD().map(
new Function<String, Person>() {
@Override
public Person call(String line) {