aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-16 15:03:57 -0700
committerReynold Xin <rxin@databricks.com>2015-05-16 15:03:57 -0700
commit161d0b4a41f453b21adde46a86e16c2743752799 (patch)
tree43519153e3a1cd636b2f8d8963e81ea62be5af15 /examples
parent1b4e710e5cdb00febb4c5920d81e77c2e3966a8b (diff)
downloadspark-161d0b4a41f453b21adde46a86e16c2743752799.tar.gz
spark-161d0b4a41f453b21adde46a86e16c2743752799.tar.bz2
spark-161d0b4a41f453b21adde46a86e16c2743752799.zip
[SPARK-7654][MLlib] Migrate MLlib to the DataFrame reader/writer API.
Author: Reynold Xin <rxin@databricks.com> Closes #6211 from rxin/mllib-reader and squashes the following commits: 79a2cb9 [Reynold Xin] [SPARK-7654][MLlib] Migrate MLlib to the DataFrame reader/writer API.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala2
2 files changed, 2 insertions, 2 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala
index c95cca7d65..520893b26d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DatasetExample.scala
@@ -103,7 +103,7 @@ object DatasetExample {
tmpDir.deleteOnExit()
val outputDir = new File(tmpDir, "dataset").toString
println(s"Saving to $outputDir as Parquet file.")
- df.saveAsParquetFile(outputDir)
+ df.write.parquet(outputDir)
println(s"Loading Parquet file with UDT from $outputDir.")
val newDataset = sqlContext.read.parquet(outputDir)
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index acc89199d5..b11e32047d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -58,7 +58,7 @@ object RDDRelation {
df.where($"key" === 1).orderBy($"value".asc).select($"key").collect().foreach(println)
// Write out an RDD as a parquet file.
- df.saveAsParquetFile("pair.parquet")
+ df.write.parquet("pair.parquet")
// Read in parquet file. Parquet files are self-describing so the schmema is preserved.
val parquetFile = sqlContext.read.parquet("pair.parquet")