aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorSean Zhong <seanzhong@databricks.com>2016-05-12 15:51:53 +0800
committerCheng Lian <lian@databricks.com>2016-05-12 15:51:53 +0800
commit33c6eb5218ce3c31cc9f632a67fd2c7057569683 (patch)
treeb8c84c24107bf1ece596450ef3a3eec26df1f21d /examples
parent5207a005cc86618907b8f467abc03eacef485ecd (diff)
downloadspark-33c6eb5218ce3c31cc9f632a67fd2c7057569683.tar.gz
spark-33c6eb5218ce3c31cc9f632a67fd2c7057569683.tar.bz2
spark-33c6eb5218ce3c31cc9f632a67fd2c7057569683.zip
[SPARK-15171][SQL] Deprecate registerTempTable and add dataset.createTempView
## What changes were proposed in this pull request? Deprecates registerTempTable and add dataset.createTempView, dataset.createOrReplaceTempView. ## How was this patch tested? Unit tests. Author: Sean Zhong <seanzhong@databricks.com> Closes #12945 from clockfly/spark-15171.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java8
-rw-r--r--examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java2
-rw-r--r--examples/src/main/python/sql.py2
-rw-r--r--examples/src/main/python/streaming/sql_network_wordcount.py2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala2
6 files changed, 10 insertions, 10 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
index d956750dc7..cf0167f13a 100644
--- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
+++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
@@ -75,7 +75,7 @@ public class JavaSparkSQL {
// Apply a schema to an RDD of Java Beans and register it as a table.
Dataset<Row> schemaPeople = spark.createDataFrame(people, Person.class);
- schemaPeople.registerTempTable("people");
+ schemaPeople.createOrReplaceTempView("people");
// SQL can be run over RDDs that have been registered as tables.
Dataset<Row> teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19");
@@ -102,7 +102,7 @@ public class JavaSparkSQL {
Dataset<Row> parquetFile = spark.read().parquet("people.parquet");
//Parquet files can also be registered as tables and then used in SQL statements.
- parquetFile.registerTempTable("parquetFile");
+ parquetFile.createOrReplaceTempView("parquetFile");
Dataset<Row> teenagers2 =
spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
teenagerNames = teenagers2.toJavaRDD().map(new Function<Row, String>() {
@@ -131,7 +131,7 @@ public class JavaSparkSQL {
// |-- name: StringType
// Register this DataFrame as a table.
- peopleFromJsonFile.registerTempTable("people");
+ peopleFromJsonFile.createOrReplaceTempView("people");
// SQL statements can be run by using the sql methods provided by `spark`
Dataset<Row> teenagers3 = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19");
@@ -163,7 +163,7 @@ public class JavaSparkSQL {
// | |-- state: StringType
// |-- name: StringType
- peopleFromJsonRDD.registerTempTable("people2");
+ peopleFromJsonRDD.createOrReplaceTempView("people2");
Dataset<Row> peopleWithCity = spark.sql("SELECT name, address.city FROM people2");
List<String> nameAndCity = peopleWithCity.toJavaRDD().map(new Function<Row, String>() {
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
index 57953ef74f..5130522770 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
@@ -95,7 +95,7 @@ public final class JavaSqlNetworkWordCount {
Dataset<Row> wordsDataFrame = spark.createDataFrame(rowRDD, JavaRecord.class);
// Register as table
- wordsDataFrame.registerTempTable("words");
+ wordsDataFrame.createOrReplaceTempView("words");
// Do word count on table using SQL and print it
Dataset<Row> wordCountsDataFrame =
diff --git a/examples/src/main/python/sql.py b/examples/src/main/python/sql.py
index d2e895d931..234024063f 100644
--- a/examples/src/main/python/sql.py
+++ b/examples/src/main/python/sql.py
@@ -67,7 +67,7 @@ if __name__ == "__main__":
# |-- name: string (nullable = true)
# Register this DataFrame as a temporary table.
- people.registerTempTable("people")
+ people.createOrReplaceTempView("people")
# SQL statements can be run by using the sql methods provided by `spark`
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py b/examples/src/main/python/streaming/sql_network_wordcount.py
index f8801d4ea6..25e821534e 100644
--- a/examples/src/main/python/streaming/sql_network_wordcount.py
+++ b/examples/src/main/python/streaming/sql_network_wordcount.py
@@ -71,7 +71,7 @@ if __name__ == "__main__":
wordsDataFrame = spark.createDataFrame(rowRdd)
# Register as table
- wordsDataFrame.registerTempTable("words")
+ wordsDataFrame.createOrReplaceTempView("words")
# Do word count on table using SQL and print it
wordCountsDataFrame = \
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index 94c378ae4b..d1bda0ff84 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -37,7 +37,7 @@ object RDDRelation {
val df = spark.createDataFrame((1 to 100).map(i => Record(i, s"val_$i")))
// Any RDD containing case classes can be registered as a table. The schema of the table is
// automatically inferred using scala reflection.
- df.registerTempTable("records")
+ df.createOrReplaceTempView("records")
// Once tables have been registered, you can run SQL queries over them.
println("Result of SELECT *:")
@@ -67,7 +67,7 @@ object RDDRelation {
parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println)
// These files can also be registered as tables.
- parquetFile.registerTempTable("parquetFile")
+ parquetFile.createOrReplaceTempView("parquetFile")
spark.sql("SELECT * FROM parquetFile").collect().foreach(println)
spark.stop()
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 9aba4a05a8..688c5b23c2 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -67,7 +67,7 @@ object SqlNetworkWordCount {
val wordsDataFrame = rdd.map(w => Record(w)).toDF()
// Register as table
- wordsDataFrame.registerTempTable("words")
+ wordsDataFrame.createOrReplaceTempView("words")
// Do word count on table using SQL and print it
val wordCountsDataFrame =