aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/sql-programming-guide.md48
-rw-r--r--docs/streaming-programming-guide.md12
-rw-r--r--examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java8
-rw-r--r--examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java2
-rw-r--r--examples/src/main/python/sql.py2
-rw-r--r--examples/src/main/python/streaming/sql_network_wordcount.py2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala6
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala2
-rw-r--r--mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java2
-rw-r--r--mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java10
-rw-r--r--mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java4
-rw-r--r--python/pyspark/sql/context.py4
-rw-r--r--python/pyspark/sql/readwriter.py2
-rw-r--r--python/pyspark/sql/session.py2
-rw-r--r--python/pyspark/sql/tests.py25
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala2
-rw-r--r--sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java8
-rw-r--r--sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala60
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala22
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala103
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala10
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala8
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala12
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala10
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala3
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala8
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala58
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala10
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala20
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala8
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala46
-rw-r--r--sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java2
-rw-r--r--sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala8
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala12
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala8
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala8
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala18
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala10
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala22
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala66
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLWindowFunctionSuite.scala16
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala8
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala12
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala9
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala10
-rw-r--r--sql/hivecontext-compatibility/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala4
74 files changed, 407 insertions, 401 deletions
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index a16a6bb1d9..a9e1f9d5ce 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -529,7 +529,7 @@ case class Person(name: String, age: Int)
// Create an RDD of Person objects and register it as a table.
val people = sc.textFile("examples/src/main/resources/people.txt").map(_.split(",")).map(p => Person(p(0), p(1).trim.toInt)).toDF()
-people.registerTempTable("people")
+people.createOrReplaceTempView("people")
// SQL statements can be run by using the sql methods provided by sqlContext.
val teenagers = sqlContext.sql("SELECT name, age FROM people WHERE age >= 13 AND age <= 19")
@@ -605,7 +605,7 @@ JavaRDD<Person> people = sc.textFile("examples/src/main/resources/people.txt").m
// Apply a schema to an RDD of JavaBeans and register it as a table.
DataFrame schemaPeople = sqlContext.createDataFrame(people, Person.class);
-schemaPeople.registerTempTable("people");
+schemaPeople.createOrReplaceTempView("people");
// SQL can be run over RDDs that have been registered as tables.
DataFrame teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
@@ -643,7 +643,7 @@ people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = sqlContext.createDataFrame(people)
-schemaPeople.registerTempTable("people")
+schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
@@ -703,8 +703,8 @@ val rowRDD = people.map(_.split(",")).map(p => Row(p(0), p(1).trim))
// Apply the schema to the RDD.
val peopleDataFrame = sqlContext.createDataFrame(rowRDD, schema)
-// Register the DataFrames as a table.
-peopleDataFrame.registerTempTable("people")
+// Creates a temporary view using the DataFrame.
+peopleDataFrame.createOrReplaceTempView("people")
// SQL statements can be run by using the sql methods provided by sqlContext.
val results = sqlContext.sql("SELECT name FROM people")
@@ -771,10 +771,10 @@ JavaRDD<Row> rowRDD = people.map(
// Apply the schema to the RDD.
DataFrame peopleDataFrame = sqlContext.createDataFrame(rowRDD, schema);
-// Register the DataFrame as a table.
-peopleDataFrame.registerTempTable("people");
+// Creates a temporary view using the DataFrame.
+peopleDataFrame.createOrReplaceTempView("people");
-// SQL can be run over RDDs that have been registered as tables.
+// SQL can be run over a temporary view created using DataFrames.
DataFrame results = sqlContext.sql("SELECT name FROM people");
// The results of SQL queries are DataFrames and support all the normal RDD operations.
@@ -824,8 +824,8 @@ schema = StructType(fields)
# Apply the schema to the RDD.
schemaPeople = sqlContext.createDataFrame(people, schema)
-# Register the DataFrame as a table.
-schemaPeople.registerTempTable("people")
+# Creates a temporary view using the DataFrame
+schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
results = sqlContext.sql("SELECT name FROM people")
@@ -844,7 +844,7 @@ for name in names.collect():
# Data Sources
Spark SQL supports operating on a variety of data sources through the `DataFrame` interface.
-A DataFrame can be operated on as normal RDDs and can also be registered as a temporary table.
+A DataFrame can be operated on as normal RDDs and can also be used to create a temporary view.
Registering a DataFrame as a table allows you to run SQL queries over its data. This section
describes the general methods for loading and saving data using the Spark Data Sources and then
goes into specific options that are available for the built-in data sources.
@@ -1072,8 +1072,8 @@ people.write.parquet("people.parquet")
// The result of loading a Parquet file is also a DataFrame.
val parquetFile = sqlContext.read.parquet("people.parquet")
-//Parquet files can also be registered as tables and then used in SQL statements.
-parquetFile.registerTempTable("parquetFile")
+// Parquet files can also be used to create a temporary view and then used in SQL statements.
+parquetFile.createOrReplaceTempView("parquetFile")
val teenagers = sqlContext.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenagers.map(t => "Name: " + t(0)).collect().foreach(println)
{% endhighlight %}
@@ -1094,8 +1094,8 @@ schemaPeople.write().parquet("people.parquet");
// The result of loading a parquet file is also a DataFrame.
DataFrame parquetFile = sqlContext.read().parquet("people.parquet");
-// Parquet files can also be registered as tables and then used in SQL statements.
-parquetFile.registerTempTable("parquetFile");
+// Parquet files can also be used to create a temporary view and then used in SQL statements.
+parquetFile.createOrReplaceTempView("parquetFile");
DataFrame teenagers = sqlContext.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
List<String> teenagerNames = teenagers.javaRDD().map(new Function<Row, String>() {
public String call(Row row) {
@@ -1120,8 +1120,8 @@ schemaPeople.write.parquet("people.parquet")
# The result of loading a parquet file is also a DataFrame.
parquetFile = sqlContext.read.parquet("people.parquet")
-# Parquet files can also be registered as tables and then used in SQL statements.
-parquetFile.registerTempTable("parquetFile");
+# Parquet files can also be used to create a temporary view and then used in SQL statements.
+parquetFile.createOrReplaceTempView("parquetFile");
teenagers = sqlContext.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenNames = teenagers.map(lambda p: "Name: " + p.name)
for teenName in teenNames.collect():
@@ -1144,7 +1144,7 @@ write.parquet(schemaPeople, "people.parquet")
# The result of loading a parquet file is also a DataFrame.
parquetFile <- read.parquet(sqlContext, "people.parquet")
-# Parquet files can also be registered as tables and then used in SQL statements.
+# Parquet files can also be used to create a temporary view and then used in SQL statements.
registerTempTable(parquetFile, "parquetFile")
teenagers <- sql(sqlContext, "SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
schema <- structType(structField("name", "string"))
@@ -1506,8 +1506,8 @@ people.printSchema()
// |-- age: long (nullable = true)
// |-- name: string (nullable = true)
-// Register this DataFrame as a table.
-people.registerTempTable("people")
+// Creates a temporary view using the DataFrame
+people.createOrReplaceTempView("people")
// SQL statements can be run by using the sql methods provided by sqlContext.
val teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
@@ -1544,8 +1544,8 @@ people.printSchema();
// |-- age: long (nullable = true)
// |-- name: string (nullable = true)
-// Register this DataFrame as a table.
-people.registerTempTable("people");
+// Creates a temporary view using the DataFrame
+people.createOrReplaceTempView("people");
// SQL statements can be run by using the sql methods provided by sqlContext.
DataFrame teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19");
@@ -1582,8 +1582,8 @@ people.printSchema()
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
-# Register this DataFrame as a table.
-people.registerTempTable("people")
+# Creates a temporary view using the DataFrame.
+people.createOrReplaceTempView("people")
# SQL statements can be run by using the sql methods provided by `sqlContext`.
teenagers = sqlContext.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md
index 9ca9b1844c..4d0a1122dc 100644
--- a/docs/streaming-programming-guide.md
+++ b/docs/streaming-programming-guide.md
@@ -1553,8 +1553,8 @@ words.foreachRDD { rdd =>
// Convert RDD[String] to DataFrame
val wordsDataFrame = rdd.toDF("word")
- // Register as table
- wordsDataFrame.registerTempTable("words")
+ // Create a temporary view
+ wordsDataFrame.createOrReplaceTempView("words")
// Do word count on DataFrame using SQL and print it
val wordCountsDataFrame =
@@ -1606,8 +1606,8 @@ words.foreachRDD(
});
DataFrame wordsDataFrame = sqlContext.createDataFrame(rowRDD, JavaRow.class);
- // Register as table
- wordsDataFrame.registerTempTable("words");
+ // Creates a temporary view using the DataFrame
+ wordsDataFrame.createOrReplaceTempView("words");
// Do word count on table using SQL and print it
DataFrame wordCountsDataFrame =
@@ -1646,8 +1646,8 @@ def process(time, rdd):
rowRdd = rdd.map(lambda w: Row(word=w))
wordsDataFrame = sqlContext.createDataFrame(rowRdd)
- # Register as table
- wordsDataFrame.registerTempTable("words")
+ # Creates a temporary view using the DataFrame
+ wordsDataFrame.createOrReplaceTempView("words")
# Do word count on table using SQL and print it
wordCountsDataFrame = sqlContext.sql("select word, count(*) as total from words group by word")
diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
index cf0167f13a..55e591d0ce 100644
--- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
+++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java
@@ -73,11 +73,11 @@ public class JavaSparkSQL {
}
});
- // Apply a schema to an RDD of Java Beans and register it as a table.
+ // Apply a schema to an RDD of Java Beans and create a temporary view
Dataset<Row> schemaPeople = spark.createDataFrame(people, Person.class);
schemaPeople.createOrReplaceTempView("people");
- // SQL can be run over RDDs that have been registered as tables.
+ // SQL can be run over RDDs which backs a temporary view.
Dataset<Row> teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19");
// The results of SQL queries are DataFrames and support all the normal RDD operations.
@@ -101,7 +101,7 @@ public class JavaSparkSQL {
// The result of loading a parquet file is also a DataFrame.
Dataset<Row> parquetFile = spark.read().parquet("people.parquet");
- //Parquet files can also be registered as tables and then used in SQL statements.
+ // A temporary view can be created by using Parquet files and then used in SQL statements.
parquetFile.createOrReplaceTempView("parquetFile");
Dataset<Row> teenagers2 =
spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
@@ -130,7 +130,7 @@ public class JavaSparkSQL {
// |-- age: IntegerType
// |-- name: StringType
- // Register this DataFrame as a table.
+ // Creates a temporary view using the DataFrame
peopleFromJsonFile.createOrReplaceTempView("people");
// SQL statements can be run by using the sql methods provided by `spark`
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
index 5130522770..b8e9e125ba 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java
@@ -94,7 +94,7 @@ public final class JavaSqlNetworkWordCount {
});
Dataset<Row> wordsDataFrame = spark.createDataFrame(rowRDD, JavaRecord.class);
- // Register as table
+ // Creates a temporary view using the DataFrame
wordsDataFrame.createOrReplaceTempView("words");
// Do word count on table using SQL and print it
diff --git a/examples/src/main/python/sql.py b/examples/src/main/python/sql.py
index 234024063f..ac7246938d 100644
--- a/examples/src/main/python/sql.py
+++ b/examples/src/main/python/sql.py
@@ -66,7 +66,7 @@ if __name__ == "__main__":
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
- # Register this DataFrame as a temporary table.
+ # Creates a temporary view using the DataFrame.
people.createOrReplaceTempView("people")
# SQL statements can be run by using the sql methods provided by `spark`
diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py b/examples/src/main/python/streaming/sql_network_wordcount.py
index 25e821534e..398ac8d2d8 100644
--- a/examples/src/main/python/streaming/sql_network_wordcount.py
+++ b/examples/src/main/python/streaming/sql_network_wordcount.py
@@ -70,7 +70,7 @@ if __name__ == "__main__":
rowRdd = rdd.map(lambda w: Row(word=w))
wordsDataFrame = spark.createDataFrame(rowRdd)
- # Register as table
+ # Creates a temporary view using the DataFrame.
wordsDataFrame.createOrReplaceTempView("words")
# Do word count on table using SQL and print it
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index d1bda0ff84..1b019fbb51 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -35,8 +35,8 @@ object RDDRelation {
import spark.implicits._
val df = spark.createDataFrame((1 to 100).map(i => Record(i, s"val_$i")))
- // Any RDD containing case classes can be registered as a table. The schema of the table is
- // automatically inferred using scala reflection.
+ // Any RDD containing case classes can be used to create a temporary view. The schema of the
+ // view is automatically inferred using scala reflection.
df.createOrReplaceTempView("records")
// Once tables have been registered, you can run SQL queries over them.
@@ -66,7 +66,7 @@ object RDDRelation {
// Queries can be run using the DSL on parquet files just like the original RDD.
parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println)
- // These files can also be registered as tables.
+ // These files can also be used to create a temporary view.
parquetFile.createOrReplaceTempView("parquetFile")
spark.sql("SELECT * FROM parquetFile").collect().foreach(println)
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
index a15cf5ded0..7293cb51b2 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
@@ -70,9 +70,9 @@ object HiveFromSpark {
case Row(key: Int, value: String) => s"Key: $key, Value: $value"
}
- // You can also register RDDs as temporary tables within a HiveContext.
+ // You can also use RDDs to create temporary views within a HiveContext.
val rdd = sc.parallelize((1 to 100).map(i => Record(i, s"val_$i")))
- rdd.toDF().registerTempTable("records")
+ rdd.toDF().createOrReplaceTempView("records")
// Queries can then join RDD data with data stored in Hive.
println("Result of SELECT *:")
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 688c5b23c2..787bbec73b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -66,7 +66,7 @@ object SqlNetworkWordCount {
// Convert RDD[String] to RDD[case class] to DataFrame
val wordsDataFrame = rdd.map(w => Record(w)).toDF()
- // Register as table
+ // Creates a temporary view using the DataFrame
wordsDataFrame.createOrReplaceTempView("words")
// Do word count on table using SQL and print it
diff --git a/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java b/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java
index 46c26e8b92..a81a36d1b1 100644
--- a/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/JavaPipelineSuite.java
@@ -68,7 +68,7 @@ public class JavaPipelineSuite {
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[]{scaler, lr});
PipelineModel model = pipeline.fit(dataset);
- model.transform(dataset).registerTempTable("prediction");
+ model.transform(dataset).createOrReplaceTempView("prediction");
Dataset<Row> predictions = spark.sql("SELECT label, probability, prediction FROM prediction");
predictions.collectAsList();
}
diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
index 98abca221c..b8da04c26a 100644
--- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaLogisticRegressionSuite.java
@@ -54,7 +54,7 @@ public class JavaLogisticRegressionSuite implements Serializable {
List<LabeledPoint> points = generateLogisticInputAsList(1.0, 1.0, 100, 42);
datasetRDD = jsc.parallelize(points, 2);
dataset = spark.createDataFrame(datasetRDD, LabeledPoint.class);
- dataset.registerTempTable("dataset");
+ dataset.createOrReplaceTempView("dataset");
}
@After
@@ -68,7 +68,7 @@ public class JavaLogisticRegressionSuite implements Serializable {
LogisticRegression lr = new LogisticRegression();
Assert.assertEquals(lr.getLabelCol(), "label");
LogisticRegressionModel model = lr.fit(dataset);
- model.transform(dataset).registerTempTable("prediction");
+ model.transform(dataset).createOrReplaceTempView("prediction");
Dataset<Row> predictions = spark.sql("SELECT label, probability, prediction FROM prediction");
predictions.collectAsList();
// Check defaults
@@ -97,14 +97,14 @@ public class JavaLogisticRegressionSuite implements Serializable {
// Modify model params, and check that the params worked.
model.setThreshold(1.0);
- model.transform(dataset).registerTempTable("predAllZero");
+ model.transform(dataset).createOrReplaceTempView("predAllZero");
Dataset<Row> predAllZero = spark.sql("SELECT prediction, myProbability FROM predAllZero");
for (Row r : predAllZero.collectAsList()) {
Assert.assertEquals(0.0, r.getDouble(0), eps);
}
// Call transform with params, and check that the params worked.
model.transform(dataset, model.threshold().w(0.0), model.probabilityCol().w("myProb"))
- .registerTempTable("predNotAllZero");
+ .createOrReplaceTempView("predNotAllZero");
Dataset<Row> predNotAllZero = spark.sql("SELECT prediction, myProb FROM predNotAllZero");
boolean foundNonZero = false;
for (Row r : predNotAllZero.collectAsList()) {
@@ -130,7 +130,7 @@ public class JavaLogisticRegressionSuite implements Serializable {
LogisticRegressionModel model = lr.fit(dataset);
Assert.assertEquals(2, model.numClasses());
- model.transform(dataset).registerTempTable("transformed");
+ model.transform(dataset).createOrReplaceTempView("transformed");
Dataset<Row> trans1 = spark.sql("SELECT rawPrediction, probability FROM transformed");
for (Row row : trans1.collectAsList()) {
Vector raw = (Vector) row.get(0);
diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
index d3ef5f6fca..126aa6298f 100644
--- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaLinearRegressionSuite.java
@@ -50,7 +50,7 @@ public class JavaLinearRegressionSuite implements Serializable {
List<LabeledPoint> points = generateLogisticInputAsList(1.0, 1.0, 100, 42);
datasetRDD = jsc.parallelize(points, 2);
dataset = spark.createDataFrame(datasetRDD, LabeledPoint.class);
- dataset.registerTempTable("dataset");
+ dataset.createOrReplaceTempView("dataset");
}
@After
@@ -65,7 +65,7 @@ public class JavaLinearRegressionSuite implements Serializable {
assertEquals("label", lr.getLabelCol());
assertEquals("auto", lr.getSolver());
LinearRegressionModel model = lr.fit(dataset);
- model.transform(dataset).registerTempTable("prediction");
+ model.transform(dataset).createOrReplaceTempView("prediction");
Dataset<Row> predictions = spark.sql("SELECT label, prediction FROM prediction");
predictions.collect();
// Check defaults
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index ca111ae9bb..e8e60c6412 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -57,7 +57,7 @@ class SQLContext(object):
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
- >>> df.registerTempTable("allTypes")
+ >>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
@@ -106,7 +106,7 @@ class SQLContext(object):
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
- registered temporary tables and UDFs, but shared SparkContext and
+ registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index c98aef1a0e..8e6bce9001 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -266,7 +266,7 @@ class DataFrameReader(object):
:param tableName: string, name of the table.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
- >>> df.registerTempTable('tmpTable')
+ >>> df.createOrReplaceTempView('tmpTable')
>>> spark.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 0781b442cb..257a239c8d 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -186,7 +186,7 @@ class SparkSession(object):
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
- registered temporary tables and UDFs, but shared SparkContext and
+ registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 0977c43a39..e86f44281d 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -294,7 +294,8 @@ class SQLTests(ReusedPySparkTestCase):
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
- self.spark.createDataFrame(self.sc.parallelize([Row(a="test")])).registerTempTable("test")
+ self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
+ .createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
@@ -320,7 +321,7 @@ class SQLTests(ReusedPySparkTestCase):
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
- self.spark.createDataFrame(rdd).registerTempTable("test")
+ self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
@@ -360,7 +361,7 @@ class SQLTests(ReusedPySparkTestCase):
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
- df.registerTempTable("temp")
+ df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
@@ -420,7 +421,7 @@ class SQLTests(ReusedPySparkTestCase):
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
- df.registerTempTable("test")
+ df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
@@ -428,7 +429,7 @@ class SQLTests(ReusedPySparkTestCase):
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
- df2.registerTempTable("test2")
+ df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
@@ -487,7 +488,7 @@ class SQLTests(ReusedPySparkTestCase):
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
- df.registerTempTable("table2")
+ df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
@@ -515,7 +516,7 @@ class SQLTests(ReusedPySparkTestCase):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
- df.registerTempTable("test")
+ df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
@@ -556,7 +557,7 @@ class SQLTests(ReusedPySparkTestCase):
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
- df.registerTempTable("labeled_point")
+ df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
@@ -565,7 +566,7 @@ class SQLTests(ReusedPySparkTestCase):
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
- df.registerTempTable("labeled_point")
+ df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
@@ -1427,7 +1428,7 @@ class SQLTests(ReusedPySparkTestCase):
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
- spark.createDataFrame([(1, 1)]).registerTempTable("temp_tab")
+ spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT)")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT)")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
@@ -1554,8 +1555,8 @@ class SQLTests(ReusedPySparkTestCase):
def test_cache(self):
spark = self.spark
- spark.createDataFrame([(2, 2), (3, 3)]).registerTempTable("tab1")
- spark.createDataFrame([(2, 2), (3, 3)]).registerTempTable("tab2")
+ spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
+ spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 44511885a7..a3e2b49556 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -376,7 +376,7 @@ class SQLContext private[sql](
* // |-- name: string (nullable = false)
* // |-- age: integer (nullable = true)
*
- * dataFrame.registerTempTable("people")
+ * dataFrame.createOrReplaceTempView("people")
* sqlContext.sql("select name from people").collect.foreach(println)
* }}}
*
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
index f2ae40e644..573d0e3594 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
@@ -108,7 +108,7 @@ public class JavaApplySchemaSuite implements Serializable {
StructType schema = DataTypes.createStructType(fields);
Dataset<Row> df = spark.createDataFrame(rowRDD, schema);
- df.registerTempTable("people");
+ df.createOrReplaceTempView("people");
List<Row> actual = spark.sql("SELECT * FROM people").collectAsList();
List<Row> expected = new ArrayList<>(2);
@@ -144,7 +144,7 @@ public class JavaApplySchemaSuite implements Serializable {
StructType schema = DataTypes.createStructType(fields);
Dataset<Row> df = spark.createDataFrame(rowRDD, schema);
- df.registerTempTable("people");
+ df.createOrReplaceTempView("people");
List<String> actual = spark.sql("SELECT * FROM people").toJavaRDD()
.map(new Function<Row, String>() {
@Override
@@ -202,14 +202,14 @@ public class JavaApplySchemaSuite implements Serializable {
Dataset<Row> df1 = spark.read().json(jsonRDD);
StructType actualSchema1 = df1.schema();
Assert.assertEquals(expectedSchema, actualSchema1);
- df1.registerTempTable("jsonTable1");
+ df1.createOrReplaceTempView("jsonTable1");
List<Row> actual1 = spark.sql("select * from jsonTable1").collectAsList();
Assert.assertEquals(expectedResult, actual1);
Dataset<Row> df2 = spark.read().schema(expectedSchema).json(jsonRDD);
StructType actualSchema2 = df2.schema();
Assert.assertEquals(expectedSchema, actualSchema2);
- df2.registerTempTable("jsonTable2");
+ df2.createOrReplaceTempView("jsonTable2");
List<Row> actual2 = spark.sql("select * from jsonTable2").collectAsList();
Assert.assertEquals(expectedResult, actual2);
}
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
index d0435e4d43..9840bc46f9 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaSaveLoadSuite.java
@@ -72,7 +72,7 @@ public class JavaSaveLoadSuite {
}
JavaRDD<String> rdd = jsc.parallelize(jsonObjects);
df = spark.read().json(rdd);
- df.registerTempTable("jsonTable");
+ df.createOrReplaceTempView("jsonTable");
}
@After
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index 6d8de80a11..1c96bdc05c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -71,7 +71,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("cache temp table") {
- testData.select('key).registerTempTable("tempTable")
+ testData.select('key).createOrReplaceTempView("tempTable")
assertCached(sql("SELECT COUNT(*) FROM tempTable"), 0)
spark.catalog.cacheTable("tempTable")
assertCached(sql("SELECT COUNT(*) FROM tempTable"))
@@ -99,8 +99,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("uncaching temp table") {
- testData.select('key).registerTempTable("tempTable1")
- testData.select('key).registerTempTable("tempTable2")
+ testData.select('key).createOrReplaceTempView("tempTable1")
+ testData.select('key).createOrReplaceTempView("tempTable2")
spark.catalog.cacheTable("tempTable1")
assertCached(sql("SELECT COUNT(*) FROM tempTable1"))
@@ -116,7 +116,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
test("too big for memory") {
val data = "*" * 1000
sparkContext.parallelize(1 to 200000, 1).map(_ => BigData(data)).toDF()
- .registerTempTable("bigData")
+ .createOrReplaceTempView("bigData")
spark.table("bigData").persist(StorageLevel.MEMORY_AND_DISK)
assert(spark.table("bigData").count() === 200000L)
spark.table("bigData").unpersist(blocking = true)
@@ -191,7 +191,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("SELECT star from cached table") {
- sql("SELECT * FROM testData").registerTempTable("selectStar")
+ sql("SELECT * FROM testData").createOrReplaceTempView("selectStar")
spark.catalog.cacheTable("selectStar")
checkAnswer(
sql("SELECT * FROM selectStar WHERE key = 1"),
@@ -286,15 +286,15 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("Drops temporary table") {
- testData.select('key).registerTempTable("t1")
+ testData.select('key).createOrReplaceTempView("t1")
spark.table("t1")
spark.catalog.dropTempView("t1")
intercept[AnalysisException](spark.table("t1"))
}
test("Drops cached temporary table") {
- testData.select('key).registerTempTable("t1")
- testData.select('key).registerTempTable("t2")
+ testData.select('key).createOrReplaceTempView("t1")
+ testData.select('key).createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
assert(spark.catalog.isCached("t1"))
@@ -306,15 +306,15 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("Clear all cache") {
- sql("SELECT key FROM testData LIMIT 10").registerTempTable("t1")
- sql("SELECT key FROM testData LIMIT 5").registerTempTable("t2")
+ sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
+ sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
spark.catalog.clearCache()
assert(spark.cacheManager.isEmpty)
- sql("SELECT key FROM testData LIMIT 10").registerTempTable("t1")
- sql("SELECT key FROM testData LIMIT 5").registerTempTable("t2")
+ sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
+ sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
sql("Clear CACHE")
@@ -322,8 +322,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("Clear accumulators when uncacheTable to prevent memory leaking") {
- sql("SELECT key FROM testData LIMIT 10").registerTempTable("t1")
- sql("SELECT key FROM testData LIMIT 5").registerTempTable("t2")
+ sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
+ sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
@@ -350,7 +350,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
test("SPARK-10327 Cache Table is not working while subquery has alias in its project list") {
sparkContext.parallelize((1, 1) :: (2, 2) :: Nil)
- .toDF("key", "value").selectExpr("key", "value", "key+1").registerTempTable("abc")
+ .toDF("key", "value").selectExpr("key", "value", "key+1").createOrReplaceTempView("abc")
spark.catalog.cacheTable("abc")
val sparkPlan = sql(
@@ -371,9 +371,9 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
test("A cached table preserves the partitioning and ordering of its cached SparkPlan") {
val table3x = testData.union(testData).union(testData)
- table3x.registerTempTable("testData3x")
+ table3x.createOrReplaceTempView("testData3x")
- sql("SELECT key, value FROM testData3x ORDER BY key").registerTempTable("orderedTable")
+ sql("SELECT key, value FROM testData3x ORDER BY key").createOrReplaceTempView("orderedTable")
spark.catalog.cacheTable("orderedTable")
assertCached(spark.table("orderedTable"))
// Should not have an exchange as the query is already sorted on the group by key.
@@ -388,8 +388,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
// different number of partitions.
for (numPartitions <- 1 until 10 by 4) {
withTempTable("t1", "t2") {
- testData.repartition(numPartitions, $"key").registerTempTable("t1")
- testData2.repartition(numPartitions, $"a").registerTempTable("t2")
+ testData.repartition(numPartitions, $"key").createOrReplaceTempView("t1")
+ testData2.repartition(numPartitions, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
@@ -410,8 +410,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
// Distribute the tables into non-matching number of partitions. Need to shuffle one side.
withTempTable("t1", "t2") {
- testData.repartition(6, $"key").registerTempTable("t1")
- testData2.repartition(3, $"a").registerTempTable("t2")
+ testData.repartition(6, $"key").createOrReplaceTempView("t1")
+ testData2.repartition(3, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
@@ -427,8 +427,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
// One side of join is not partitioned in the desired way. Need to shuffle one side.
withTempTable("t1", "t2") {
- testData.repartition(6, $"value").registerTempTable("t1")
- testData2.repartition(6, $"a").registerTempTable("t2")
+ testData.repartition(6, $"value").createOrReplaceTempView("t1")
+ testData2.repartition(6, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
@@ -443,8 +443,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
withTempTable("t1", "t2") {
- testData.repartition(6, $"value").registerTempTable("t1")
- testData2.repartition(12, $"a").registerTempTable("t2")
+ testData.repartition(6, $"value").createOrReplaceTempView("t1")
+ testData2.repartition(12, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
@@ -462,8 +462,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
// the side that has already partitioned is smaller than the side that is not partitioned,
// we shuffle both side.
withTempTable("t1", "t2") {
- testData.repartition(6, $"value").registerTempTable("t1")
- testData2.repartition(3, $"a").registerTempTable("t2")
+ testData.repartition(6, $"value").createOrReplaceTempView("t1")
+ testData2.repartition(3, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
@@ -479,7 +479,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
// repartition's column ordering is different from group by column ordering.
// But they use the same set of columns.
withTempTable("t1") {
- testData.repartition(6, $"value", $"key").registerTempTable("t1")
+ testData.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
spark.catalog.cacheTable("t1")
val query = sql("SELECT value, key from t1 group by key, value")
@@ -496,9 +496,9 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
// See PartitioningSuite for more details.
withTempTable("t1", "t2") {
val df1 = testData
- df1.repartition(6, $"value", $"key").registerTempTable("t1")
+ df1.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
val df2 = testData2.select($"a", $"b".cast("string"))
- df2.repartition(6, $"a", $"b").registerTempTable("t2")
+ df2.repartition(6, $"a", $"b").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
index a5aecca13f..e89fa32b15 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
@@ -321,7 +321,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSQLContext {
nanvl($"b", $"e"), nanvl($"e", $"f")),
Row(null, 3.0, 10.0, null, Double.PositiveInfinity, 3.0, 1.0)
)
- testData.registerTempTable("t")
+ testData.createOrReplaceTempView("t")
checkAnswer(
sql(
"select nanvl(a, 5), nanvl(b, 10), nanvl(10, b), nanvl(c, null), nanvl(d, 10), " +
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
index 4ee2006421..a15b4e1221 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala
@@ -245,7 +245,7 @@ class DataFrameTimeWindowingSuite extends QueryTest with SharedSQLContext with B
Seq(
("2016-03-27 19:39:34", 1),
("2016-03-27 19:39:56", 2),
- ("2016-03-27 19:39:27", 4)).toDF("time", "value").registerTempTable(tableName)
+ ("2016-03-27 19:39:27", 4)).toDF("time", "value").createOrReplaceTempView(tableName)
try {
f(tableName)
} finally {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala
index 91095af0dd..07aad3c406 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowSuite.scala
@@ -49,7 +49,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("lead") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
@@ -59,7 +59,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("lag") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
@@ -70,7 +70,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("lead with default value") {
val df = Seq((1, "1"), (1, "1"), (2, "2"), (1, "1"),
(2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
lead("value", 2, "n/a").over(Window.partitionBy("key").orderBy("value"))),
@@ -80,7 +80,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("lag with default value") {
val df = Seq((1, "1"), (1, "1"), (2, "2"), (1, "1"),
(2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
lag("value", 2, "n/a").over(Window.partitionBy($"key").orderBy($"value"))),
@@ -89,7 +89,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("rank functions in unspecific window") {
val df = Seq((1, "1"), (2, "2"), (1, "2"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
$"key",
@@ -112,7 +112,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("aggregation and rows between") {
val df = Seq((1, "1"), (2, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
avg("key").over(Window.partitionBy($"value").orderBy($"key").rowsBetween(-1, 2))),
@@ -121,7 +121,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("aggregation and range between") {
val df = Seq((1, "1"), (1, "1"), (3, "1"), (2, "2"), (2, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
avg("key").over(Window.partitionBy($"value").orderBy($"key").rangeBetween(-1, 1))),
@@ -131,7 +131,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("aggregation and rows between with unbounded") {
val df = Seq((1, "1"), (2, "2"), (2, "3"), (1, "3"), (3, "2"), (4, "3")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
$"key",
@@ -146,7 +146,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("aggregation and range between with unbounded") {
val df = Seq((5, "1"), (5, "2"), (4, "2"), (6, "2"), (3, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
$"key",
@@ -357,7 +357,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("aggregation and rows between with unbounded + predicate pushdown") {
val df = Seq((1, "1"), (2, "2"), (2, "3"), (1, "3"), (3, "2"), (4, "3")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
val selectList = Seq($"key", $"value",
last("key").over(
Window.partitionBy($"value").orderBy($"key").rowsBetween(0, Long.MaxValue)),
@@ -372,7 +372,7 @@ class DataFrameWindowSuite extends QueryTest with SharedSQLContext {
test("aggregation and range between with unbounded + predicate pushdown") {
val df = Seq((5, "1"), (5, "2"), (4, "2"), (6, "2"), (3, "1"), (2, "2")).toDF("key", "value")
- df.registerTempTable("window_table")
+ df.createOrReplaceTempView("window_table")
val selectList = Seq($"key", $"value",
last("value").over(
Window.partitionBy($"value").orderBy($"key").rangeBetween(-2, -1)).equalTo("2")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
index da567db5ee..a6b83b3d07 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
@@ -344,8 +344,8 @@ class JoinSuite extends QueryTest with SharedSQLContext {
}
test("full outer join") {
- upperCaseData.where('N <= 4).registerTempTable("`left`")
- upperCaseData.where('N >= 3).registerTempTable("`right`")
+ upperCaseData.where('N <= 4).createOrReplaceTempView("`left`")
+ upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
val left = UnresolvedRelation(TableIdentifier("left"), None)
val right = UnresolvedRelation(TableIdentifier("right"), None)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala
index 1c6e6cc15d..65fe271b69 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ListTablesSuite.scala
@@ -29,7 +29,7 @@ class ListTablesSuite extends QueryTest with BeforeAndAfter with SharedSQLContex
private lazy val df = (1 to 10).map(i => (i, s"str$i")).toDF("key", "value")
before {
- df.registerTempTable("listtablessuitetable")
+ df.createOrReplaceTempView("listtablessuitetable")
}
after {
@@ -74,7 +74,7 @@ class ListTablesSuite extends QueryTest with BeforeAndAfter with SharedSQLContex
case tableDF =>
assert(expectedSchema === tableDF.schema)
- tableDF.registerTempTable("tables")
+ tableDF.createOrReplaceTempView("tables")
checkAnswer(
sql(
"SELECT isTemporary, tableName from tables WHERE tableName = 'listtablessuitetable'"),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
index 1d5fc570c6..38d7b6e25b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
@@ -60,7 +60,7 @@ class SQLContextSuite extends SparkFunSuite with SharedSparkContext {
// temporary table should not be shared
val df = session1.range(10)
- df.registerTempTable("test1")
+ df.createOrReplaceTempView("test1")
assert(session1.tableNames().contains("test1"))
assert(!session2.tableNames().contains("test1"))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index b67e2bdeb3..010dea5b30 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -39,7 +39,8 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
setupTestData()
test("having clause") {
- Seq(("one", 1), ("two", 2), ("three", 3), ("one", 5)).toDF("k", "v").registerTempTable("hav")
+ Seq(("one", 1), ("two", 2), ("three", 3), ("one", 5)).toDF("k", "v")
+ .createOrReplaceTempView("hav")
checkAnswer(
sql("SELECT k, sum(v) FROM hav GROUP BY k HAVING sum(v) > 2"),
Row("one", 6) :: Row("three", 3) :: Nil)
@@ -47,7 +48,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-8010: promote numeric to string") {
val df = Seq((1, 1)).toDF("key", "value")
- df.registerTempTable("src")
+ df.createOrReplaceTempView("src")
val queryCaseWhen = sql("select case when true then 1.0 else '1' end from src ")
val queryCoalesce = sql("select coalesce(null, 1, '1') from src ")
@@ -100,7 +101,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
(83, 0, 38),
(26, 0, 79),
(43, 81, 24)
- ).toDF("a", "b", "c").registerTempTable("cachedData")
+ ).toDF("a", "b", "c").createOrReplaceTempView("cachedData")
spark.catalog.cacheTable("cachedData")
checkAnswer(
@@ -109,7 +110,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
test("self join with aliases") {
- Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str").registerTempTable("df")
+ Seq(1, 2, 3).map(i => (i, i.toString)).toDF("int", "str").createOrReplaceTempView("df")
checkAnswer(
sql(
@@ -137,7 +138,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
.toDF("int", "str")
.groupBy("str")
.agg($"str", count("str").as("strCount"))
- .registerTempTable("df")
+ .createOrReplaceTempView("df")
checkAnswer(
sql(
@@ -195,7 +196,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("grouping on nested fields") {
spark.read.json(sparkContext.parallelize(
"""{"nested": {"attribute": 1}, "value": 2}""" :: Nil))
- .registerTempTable("rows")
+ .createOrReplaceTempView("rows")
checkAnswer(
sql(
@@ -214,7 +215,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
spark.read.json(
sparkContext.parallelize(
Seq("{\"a\": \"1\"}}", "{\"a\": \"2\"}}", "{\"a\": \"3\"}}")))
- .registerTempTable("d")
+ .createOrReplaceTempView("d")
checkAnswer(
sql("select * from d where d.a in (1,2)"),
@@ -225,7 +226,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
spark.read.json(
sparkContext.parallelize(
Seq("{\"a\": \"1\"}}", "{\"a\": \"2\"}}", "{\"a\": \"3\"}}", "")))
- .registerTempTable("d")
+ .createOrReplaceTempView("d")
checkAnswer(
sql("select count(1) from d"),
@@ -261,7 +262,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
spark.table("testData")
.union(spark.table("testData"))
.union(spark.table("testData"))
- .registerTempTable("testData3x")
+ .createOrReplaceTempView("testData3x")
try {
// Just to group rows.
@@ -391,7 +392,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
test("SPARK-3173 Timestamp support in the parser") {
- (0 to 3).map(i => Tuple1(new Timestamp(i))).toDF("time").registerTempTable("timestamps")
+ (0 to 3).map(i => Tuple1(new Timestamp(i))).toDF("time").createOrReplaceTempView("timestamps")
checkAnswer(sql(
"SELECT time FROM timestamps WHERE time='1969-12-31 16:00:00.0'"),
@@ -746,7 +747,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("count of empty table") {
withTempTable("t") {
- Seq.empty[(Int, Int)].toDF("a", "b").registerTempTable("t")
+ Seq.empty[(Int, Int)].toDF("a", "b").createOrReplaceTempView("t")
checkAnswer(
sql("select count(a) from t"),
Row(0))
@@ -891,10 +892,10 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-3349 partitioning after limit") {
sql("SELECT DISTINCT n FROM lowerCaseData ORDER BY n DESC")
.limit(2)
- .registerTempTable("subset1")
+ .createOrReplaceTempView("subset1")
sql("SELECT DISTINCT n FROM lowerCaseData ORDER BY n ASC")
.limit(2)
- .registerTempTable("subset2")
+ .createOrReplaceTempView("subset2")
checkAnswer(
sql("SELECT * FROM lowerCaseData INNER JOIN subset1 ON subset1.n = lowerCaseData.n"),
Row(3, "c", 3) ::
@@ -1111,7 +1112,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
- df1.registerTempTable("applySchema1")
+ df1.createOrReplaceTempView("applySchema1")
checkAnswer(
sql("SELECT * FROM applySchema1"),
Row(1, "A1", true, null) ::
@@ -1141,7 +1142,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
val df2 = spark.createDataFrame(rowRDD2, schema2)
- df2.registerTempTable("applySchema2")
+ df2.createOrReplaceTempView("applySchema2")
checkAnswer(
sql("SELECT * FROM applySchema2"),
Row(Row(1, true), Map("A1" -> null)) ::
@@ -1166,7 +1167,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
val df3 = spark.createDataFrame(rowRDD3, schema2)
- df3.registerTempTable("applySchema3")
+ df3.createOrReplaceTempView("applySchema3")
checkAnswer(
sql("SELECT f1.f11, f2['D4'] FROM applySchema3"),
@@ -1214,7 +1215,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
def validateMetadata(rdd: DataFrame): Unit = {
assert(rdd.schema("name").metadata.getString(docKey) == docValue)
}
- personWithMeta.registerTempTable("personWithMeta")
+ personWithMeta.createOrReplaceTempView("personWithMeta")
validateMetadata(personWithMeta.select($"name"))
validateMetadata(personWithMeta.select($"name"))
validateMetadata(personWithMeta.select($"id", $"name"))
@@ -1409,7 +1410,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-3483 Special chars in column names") {
val data = sparkContext.parallelize(
Seq("""{"key?number1": "value1", "key.number2": "value2"}"""))
- spark.read.json(data).registerTempTable("records")
+ spark.read.json(data).createOrReplaceTempView("records")
sql("SELECT `key?number1`, `key.number2` FROM records")
}
@@ -1451,12 +1452,12 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-4322 Grouping field with struct field as sub expression") {
spark.read.json(sparkContext.makeRDD("""{"a": {"b": [{"c": 1}]}}""" :: Nil))
- .registerTempTable("data")
+ .createOrReplaceTempView("data")
checkAnswer(sql("SELECT a.b[0].c FROM data GROUP BY a.b[0].c"), Row(1))
spark.catalog.dropTempView("data")
spark.read.json(
- sparkContext.makeRDD("""{"a": {"b": 1}}""" :: Nil)).registerTempTable("data")
+ sparkContext.makeRDD("""{"a": {"b": 1}}""" :: Nil)).createOrReplaceTempView("data")
checkAnswer(sql("SELECT a.b + 1 FROM data GROUP BY a.b + 1"), Row(2))
spark.catalog.dropTempView("data")
}
@@ -1478,10 +1479,10 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("Supporting relational operator '<=>' in Spark SQL") {
val nullCheckData1 = TestData(1, "1") :: TestData(2, null) :: Nil
val rdd1 = sparkContext.parallelize((0 to 1).map(i => nullCheckData1(i)))
- rdd1.toDF().registerTempTable("nulldata1")
+ rdd1.toDF().createOrReplaceTempView("nulldata1")
val nullCheckData2 = TestData(1, "1") :: TestData(2, null) :: Nil
val rdd2 = sparkContext.parallelize((0 to 1).map(i => nullCheckData2(i)))
- rdd2.toDF().registerTempTable("nulldata2")
+ rdd2.toDF().createOrReplaceTempView("nulldata2")
checkAnswer(sql("SELECT nulldata1.key FROM nulldata1 join " +
"nulldata2 on nulldata1.value <=> nulldata2.value"),
(1 to 2).map(i => Row(i)))
@@ -1490,7 +1491,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("Multi-column COUNT(DISTINCT ...)") {
val data = TestData(1, "val_1") :: TestData(2, "val_2") :: Nil
val rdd = sparkContext.parallelize((0 to 1).map(i => data(i)))
- rdd.toDF().registerTempTable("distinctData")
+ rdd.toDF().createOrReplaceTempView("distinctData")
checkAnswer(sql("SELECT COUNT(DISTINCT key,value) FROM distinctData"), Row(2))
}
@@ -1498,7 +1499,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val data = TestData(1, "val_1") :: TestData(2, "val_2") :: Nil
val rdd = sparkContext.parallelize((0 to 1).map(i => data(i)))
- rdd.toDF().registerTempTable("testTable1")
+ rdd.toDF().createOrReplaceTempView("testTable1")
checkAnswer(sql("SELECT VALUE FROM TESTTABLE1 where KEY = 1"), Row("val_1"))
}
}
@@ -1506,7 +1507,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-6145: ORDER BY test for nested fields") {
spark.read.json(sparkContext.makeRDD(
"""{"a": {"b": 1, "a": {"a": 1}}, "c": [{"d": 1}]}""" :: Nil))
- .registerTempTable("nestedOrder")
+ .createOrReplaceTempView("nestedOrder")
checkAnswer(sql("SELECT 1 FROM nestedOrder ORDER BY a.b"), Row(1))
checkAnswer(sql("SELECT a.b FROM nestedOrder ORDER BY a.b"), Row(1))
@@ -1517,8 +1518,10 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
test("SPARK-6145: special cases") {
- spark.read.json(sparkContext.makeRDD(
- """{"a": {"b": [1]}, "b": [{"a": 1}], "_c0": {"a": 1}}""" :: Nil)).registerTempTable("t")
+ spark.read
+ .json(sparkContext.makeRDD("""{"a": {"b": [1]}, "b": [{"a": 1}], "_c0": {"a": 1}}""" :: Nil))
+ .createOrReplaceTempView("t")
+
checkAnswer(sql("SELECT a.b[0] FROM t ORDER BY _c0.a"), Row(1))
checkAnswer(sql("SELECT b[0].a FROM t ORDER BY _c0.a"), Row(1))
}
@@ -1526,14 +1529,14 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-6898: complete support for special chars in column names") {
spark.read.json(sparkContext.makeRDD(
"""{"a": {"c.b": 1}, "b.$q": [{"a@!.q": 1}], "q.w": {"w.i&": [1]}}""" :: Nil))
- .registerTempTable("t")
+ .createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.`c.b`, `b.$q`[0].`a@!.q`, `q.w`.`w.i&`[0] FROM t"), Row(1, 1, 1))
}
test("SPARK-6583 order by aggregated function") {
Seq("1" -> 3, "1" -> 4, "2" -> 7, "2" -> 8, "3" -> 5, "3" -> 6, "4" -> 1, "4" -> 2)
- .toDF("a", "b").registerTempTable("orderByData")
+ .toDF("a", "b").createOrReplaceTempView("orderByData")
checkAnswer(
sql(
@@ -1619,7 +1622,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
(0, null, null, false),
(1, null, null, false),
(null, null, null, true)
- ).toDF("i", "b", "r1", "r2").registerTempTable("t")
+ ).toDF("i", "b", "r1", "r2").createOrReplaceTempView("t")
checkAnswer(sql("select i = b from t"), sql("select r1 from t"))
checkAnswer(sql("select i <=> b from t"), sql("select r2 from t"))
@@ -1629,14 +1632,14 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-7067: order by queries for complex ExtractValue chain") {
withTempTable("t") {
spark.read.json(sparkContext.makeRDD(
- """{"a": {"b": [{"c": 1}]}, "b": [{"d": 1}]}""" :: Nil)).registerTempTable("t")
+ """{"a": {"b": [{"c": 1}]}, "b": [{"d": 1}]}""" :: Nil)).createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.b FROM t ORDER BY b[0].d"), Row(Seq(Row(1))))
}
}
test("SPARK-8782: ORDER BY NULL") {
withTempTable("t") {
- Seq((1, 2), (1, 2)).toDF("a", "b").registerTempTable("t")
+ Seq((1, 2), (1, 2)).toDF("a", "b").createOrReplaceTempView("t")
checkAnswer(sql("SELECT * FROM t ORDER BY NULL"), Seq(Row(1, 2), Row(1, 2)))
}
}
@@ -1645,7 +1648,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
withTempTable("t") {
val df = Seq(1 -> "a").toDF("count", "sort")
checkAnswer(df.filter("count > 0"), Row(1, "a"))
- df.registerTempTable("t")
+ df.createOrReplaceTempView("t")
checkAnswer(sql("select count, sort from t"), Row(1, "a"))
}
}
@@ -1759,7 +1762,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
withTempTable("1one") {
sparkContext.parallelize(1 to 10).map(i => (i, i.toString))
.toDF("num", "str")
- .registerTempTable("1one")
+ .createOrReplaceTempView("1one")
checkAnswer(sql("select count(num) from 1one"), Row(10))
}
}
@@ -1801,7 +1804,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-10130 type coercion for IF should have children resolved first") {
withTempTable("src") {
- Seq((1, 1), (-1, 1)).toDF("key", "value").registerTempTable("src")
+ Seq((1, 1), (-1, 1)).toDF("key", "value").createOrReplaceTempView("src")
checkAnswer(
sql("SELECT IF(a > 0, a, 0) FROM (SELECT key a FROM src) temp"), Seq(Row(1), Row(0)))
}
@@ -1809,7 +1812,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-10389: order by non-attribute grouping expression on Aggregate") {
withTempTable("src") {
- Seq((1, 1), (-1, 1)).toDF("key", "value").registerTempTable("src")
+ Seq((1, 1), (-1, 1)).toDF("key", "value").createOrReplaceTempView("src")
checkAnswer(sql("SELECT MAX(value) FROM src GROUP BY key + 1 ORDER BY key + 1"),
Seq(Row(1), Row(1)))
checkAnswer(sql("SELECT MAX(value) FROM src GROUP BY key + 1 ORDER BY (key + 1) * 2"),
@@ -1872,7 +1875,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-11032: resolve having correctly") {
withTempTable("src") {
- Seq(1 -> "a").toDF("i", "j").registerTempTable("src")
+ Seq(1 -> "a").toDF("i", "j").createOrReplaceTempView("src")
checkAnswer(
sql("SELECT MIN(t.i) FROM (SELECT * FROM src WHERE i > 0) t HAVING(COUNT(1) > 0)"),
Row(1))
@@ -1910,8 +1913,8 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
Row(1, 1, 1, 1) :: Row(1, 2, 2, 1) :: Row(2, 1, 1, 2) :: Row(2, 2, 2, 2) ::
Row(3, 1, 1, 3) :: Row(3, 2, 2, 3) :: Nil)
- // Try with a registered table.
- sql("select struct(a, b) as record from testData2").registerTempTable("structTable")
+ // Try with a temporary view
+ sql("select struct(a, b) as record from testData2").createOrReplaceTempView("structTable")
checkAnswer(
sql("SELECT record.* FROM structTable"),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
@@ -1975,9 +1978,9 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
nestedStructData.select($"record.r1.*"),
Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: Row(3, 2) :: Nil)
- // Try with a registered table
+ // Try with a temporary view
withTempTable("nestedStructTable") {
- nestedStructData.registerTempTable("nestedStructTable")
+ nestedStructData.createOrReplaceTempView("nestedStructTable")
checkAnswer(
sql("SELECT record.* FROM nestedStructTable"),
nestedStructData.select($"record.*"))
@@ -2000,7 +2003,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
| (SELECT struct(a, b) as `col$.a_`, struct(b, a) as `a.b.c.` FROM testData2) tmp
""".stripMargin)
withTempTable("specialCharacterTable") {
- specialCharacterPath.registerTempTable("specialCharacterTable")
+ specialCharacterPath.createOrReplaceTempView("specialCharacterTable")
checkAnswer(
specialCharacterPath.select($"`r&&b.c`.*"),
nestedStructData.select($"record.*"))
@@ -2024,7 +2027,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
// Create a data set that contains a naming conflict
val nameConflict = sql("SELECT struct(a, b) as nameConflict, a as a FROM testData2")
withTempTable("nameConflict") {
- nameConflict.registerTempTable("nameConflict")
+ nameConflict.createOrReplaceTempView("nameConflict")
// Unqualified should resolve to table.
checkAnswer(sql("SELECT nameConflict.* FROM nameConflict"),
Row(Row(1, 1), 1) :: Row(Row(1, 2), 1) :: Row(Row(2, 1), 2) :: Row(Row(2, 2), 2) ::
@@ -2328,7 +2331,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-13056: Null in map value causes NPE") {
val df = Seq(1 -> Map("abc" -> "somestring", "cba" -> null)).toDF("key", "value")
withTempTable("maptest") {
- df.registerTempTable("maptest")
+ df.createOrReplaceTempView("maptest")
// local optimization will by pass codegen code, so we should keep the filter `key=1`
checkAnswer(sql("SELECT value['abc'] FROM maptest where key = 1"), Row("somestring"))
checkAnswer(sql("SELECT value['cba'] FROM maptest where key = 1"), Row(null))
@@ -2338,7 +2341,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("hash function") {
val df = Seq(1 -> "a", 2 -> "b").toDF("i", "j")
withTempTable("tbl") {
- df.registerTempTable("tbl")
+ df.createOrReplaceTempView("tbl")
checkAnswer(
df.select(hash($"i", $"j")),
sql("SELECT hash(i, j) from tbl")
@@ -2390,8 +2393,8 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
val df1 = Seq(("one", 1), ("two", 2), ("three", 3)).toDF("k", "v1")
val df2 = Seq(("one", 1), ("two", 22), ("one", 5)).toDF("k", "v2")
withTempTable("nt1", "nt2") {
- df1.registerTempTable("nt1")
- df2.registerTempTable("nt2")
+ df1.createOrReplaceTempView("nt1")
+ df2.createOrReplaceTempView("nt2")
checkAnswer(
sql("SELECT * FROM nt1 natural join nt2 where k = \"one\""),
Row("one", 1, 1) :: Row("one", 1, 5) :: Nil)
@@ -2418,9 +2421,9 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
val df3 = Seq((null, "r1c2", "t3r1c3"),
("r2c1", "r2c2", "t3r2c3"), ("r3c1y", "r3c2", "t3r3c3")).toDF("c1", "c2", "c3")
withTempTable("t1", "t2", "t3") {
- df1.registerTempTable("t1")
- df2.registerTempTable("t2")
- df3.registerTempTable("t3")
+ df1.createOrReplaceTempView("t1")
+ df2.createOrReplaceTempView("t2")
+ df3.createOrReplaceTempView("t3")
// inner join with one using column
checkAnswer(
sql("SELECT * FROM t1 join t2 using (c1)"),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
index 295f02f9a7..491bdb3ef9 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
@@ -78,7 +78,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with SharedSQLContext {
test("query case class RDD") {
val data = ReflectData("a", 1, 1L, 1.toFloat, 1.toDouble, 1.toShort, 1.toByte, true,
new java.math.BigDecimal(1), Date.valueOf("1970-01-01"), new Timestamp(12345), Seq(1, 2, 3))
- Seq(data).toDF().registerTempTable("reflectData")
+ Seq(data).toDF().createOrReplaceTempView("reflectData")
assert(sql("SELECT * FROM reflectData").collect().head ===
Row("a", 1, 1L, 1.toFloat, 1.toDouble, 1.toShort, 1.toByte, true,
@@ -88,7 +88,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with SharedSQLContext {
test("query case class RDD with nulls") {
val data = NullReflectData(null, null, null, null, null, null, null)
- Seq(data).toDF().registerTempTable("reflectNullData")
+ Seq(data).toDF().createOrReplaceTempView("reflectNullData")
assert(sql("SELECT * FROM reflectNullData").collect().head ===
Row.fromSeq(Seq.fill(7)(null)))
@@ -96,7 +96,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with SharedSQLContext {
test("query case class RDD with Nones") {
val data = OptionalReflectData(None, None, None, None, None, None, None)
- Seq(data).toDF().registerTempTable("reflectOptionalData")
+ Seq(data).toDF().createOrReplaceTempView("reflectOptionalData")
assert(sql("SELECT * FROM reflectOptionalData").collect().head ===
Row.fromSeq(Seq.fill(7)(null)))
@@ -104,7 +104,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with SharedSQLContext {
// Equality is broken for Arrays, so we test that separately.
test("query binary data") {
- Seq(ReflectBinary(Array[Byte](1))).toDF().registerTempTable("reflectBinary")
+ Seq(ReflectBinary(Array[Byte](1))).toDF().createOrReplaceTempView("reflectBinary")
val result = sql("SELECT data FROM reflectBinary")
.collect().head(0).asInstanceOf[Array[Byte]]
@@ -124,7 +124,7 @@ class ScalaReflectionRelationSuite extends SparkFunSuite with SharedSQLContext {
Map(10 -> Some(100L), 20 -> Some(200L), 30 -> None),
Nested(None, "abc")))
- Seq(data).toDF().registerTempTable("reflectComplexData")
+ Seq(data).toDF().createOrReplaceTempView("reflectComplexData")
assert(sql("SELECT * FROM reflectComplexData").collect().head ===
Row(
Seq(1, 2, 3),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
index 17ac0c8c6e..4819692733 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
@@ -49,9 +49,9 @@ class SubquerySuite extends QueryTest with SharedSQLContext {
protected override def beforeAll(): Unit = {
super.beforeAll()
- l.registerTempTable("l")
- r.registerTempTable("r")
- t.registerTempTable("t")
+ l.createOrReplaceTempView("l")
+ r.createOrReplaceTempView("r")
+ t.createOrReplaceTempView("t")
}
test("simple uncorrelated scalar subquery") {
@@ -99,7 +99,7 @@ class SubquerySuite extends QueryTest with SharedSQLContext {
test("uncorrelated scalar subquery on a DataFrame generated query") {
val df = Seq((1, "one"), (2, "two"), (3, "three")).toDF("key", "value")
- df.registerTempTable("subqueryData")
+ df.createOrReplaceTempView("subqueryData")
checkAnswer(
sql("select (select key from subqueryData where key > 2 order by key limit 1) + 1"),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
index 922154320c..547d3c1abe 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
@@ -53,7 +53,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
test("SPARK-8003 spark_partition_id") {
val df = Seq((1, "Tearing down the walls that divide us")).toDF("id", "saying")
- df.registerTempTable("tmp_table")
+ df.createOrReplaceTempView("tmp_table")
checkAnswer(sql("select spark_partition_id() from tmp_table").toDF(), Row(0))
spark.catalog.dropTempView("tmp_table")
}
@@ -62,7 +62,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
withTempPath { dir =>
val data = sparkContext.parallelize(0 to 10, 2).toDF("id")
data.write.parquet(dir.getCanonicalPath)
- spark.read.parquet(dir.getCanonicalPath).registerTempTable("test_table")
+ spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("test_table")
val answer = sql("select input_file_name() from test_table").head().getString(0)
assert(answer.contains(dir.getCanonicalPath))
assert(sql("select input_file_name() from test_table").distinct().collect().length >= 2)
@@ -107,7 +107,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
val df = sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString))).toDF()
- df.registerTempTable("integerData")
+ df.createOrReplaceTempView("integerData")
val result =
sql("SELECT * FROM integerData WHERE oneArgFilter(key)")
@@ -119,7 +119,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
- df.registerTempTable("groupData")
+ df.createOrReplaceTempView("groupData")
val result =
sql(
@@ -138,7 +138,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
- df.registerTempTable("groupData")
+ df.createOrReplaceTempView("groupData")
val result =
sql(
@@ -158,7 +158,7 @@ class UDFSuite extends QueryTest with SharedSQLContext {
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
- df.registerTempTable("groupData")
+ df.createOrReplaceTempView("groupData")
val result =
sql(
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala
index 3057e016c1..7d7b486530 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/UserDefinedTypeSuite.scala
@@ -95,7 +95,7 @@ class UserDefinedTypeSuite extends QueryTest with SharedSQLContext with ParquetT
test("UDTs and UDFs") {
spark.udf.register("testType", (d: UDT.MyDenseVector) => d.isInstanceOf[UDT.MyDenseVector])
- pointsRDD.registerTempTable("points")
+ pointsRDD.createOrReplaceTempView("points")
checkAnswer(
sql("SELECT testType(features) from points"),
Seq(Row(true), Row(true)))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
index d2e1ea12fd..2a5295d0d2 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
@@ -78,7 +78,7 @@ class PlannerSuite extends SharedSQLContext {
val schema = StructType(fields)
val row = Row.fromSeq(Seq.fill(fields.size)(null))
val rowRDD = sparkContext.parallelize(row :: Nil)
- spark.createDataFrame(rowRDD, schema).registerTempTable("testLimit")
+ spark.createDataFrame(rowRDD, schema).createOrReplaceTempView("testLimit")
val planned = sql(
"""
@@ -132,7 +132,7 @@ class PlannerSuite extends SharedSQLContext {
test("InMemoryRelation statistics propagation") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "81920") {
withTempTable("tiny") {
- testData.limit(3).registerTempTable("tiny")
+ testData.limit(3).createOrReplaceTempView("tiny")
sql("CACHE TABLE tiny")
val a = testData.as("a")
@@ -199,9 +199,9 @@ class PlannerSuite extends SharedSQLContext {
test("PartitioningCollection") {
withTempTable("normal", "small", "tiny") {
- testData.registerTempTable("normal")
- testData.limit(10).registerTempTable("small")
- testData.limit(3).registerTempTable("tiny")
+ testData.createOrReplaceTempView("normal")
+ testData.limit(10).createOrReplaceTempView("small")
+ testData.limit(3).createOrReplaceTempView("tiny")
// Disable broadcast join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
index b31338e827..bf3a39c84b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
@@ -134,7 +134,8 @@ class AggregateBenchmark extends BenchmarkBase {
val N = 20 << 22
val benchmark = new Benchmark("Aggregate w keys", N)
- sparkSession.range(N).selectExpr("id", "floor(rand() * 10000) as k").registerTempTable("test")
+ sparkSession.range(N).selectExpr("id", "floor(rand() * 10000) as k")
+ .createOrReplaceTempView("test")
def f(): Unit = sparkSession.sql("select k, k, sum(id) from test group by k, k").collect()
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
index 2099d4e1b3..e2fb91352d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
@@ -42,7 +42,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with SharedSQLContext {
test("default size avoids broadcast") {
// TODO: Improve this test when we have better statistics
sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString))
- .toDF().registerTempTable("sizeTst")
+ .toDF().createOrReplaceTempView("sizeTst")
spark.catalog.cacheTable("sizeTst")
assert(
spark.table("sizeTst").queryExecution.analyzed.statistics.sizeInBytes >
@@ -92,7 +92,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with SharedSQLContext {
test("SPARK-2729 regression: timestamp data type") {
val timestamps = (0 to 3).map(i => Tuple1(new Timestamp(i))).toDF("time")
- timestamps.registerTempTable("timestamps")
+ timestamps.createOrReplaceTempView("timestamps")
checkAnswer(
sql("SELECT time FROM timestamps"),
@@ -133,7 +133,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with SharedSQLContext {
assert(df.schema.head.dataType === DecimalType(15, 10))
- df.cache().registerTempTable("test_fixed_decimal")
+ df.cache().createOrReplaceTempView("test_fixed_decimal")
checkAnswer(
sql("SELECT * FROM test_fixed_decimal"),
(1 to 10).map(i => Row(Decimal(i, 15, 10).toJavaBigDecimal)))
@@ -179,7 +179,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with SharedSQLContext {
(i to i + 10).map(j => s"map_key_$j" -> (Long.MaxValue - j)).toMap,
Row((i - 0.25).toFloat, Seq(true, false, null)))
}
- spark.createDataFrame(rdd, schema).registerTempTable("InMemoryCache_different_data_types")
+ spark.createDataFrame(rdd, schema).createOrReplaceTempView("InMemoryCache_different_data_types")
// Cache the table.
sql("cache table InMemoryCache_different_data_types")
// Make sure the table is indeed cached.
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
index 48c798986b..a118cec0bb 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/PartitionBatchPruningSuite.scala
@@ -63,7 +63,7 @@ class PartitionBatchPruningSuite
val string = if (((key - 1) / 10) % 2 == 0) null else key.toString
TestData(key, string)
}, 5).toDF()
- pruningData.registerTempTable("pruningData")
+ pruningData.createOrReplaceTempView("pruningData")
spark.catalog.cacheTable("pruningData")
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 63fe4658d6..46213a22ed 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -239,7 +239,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
StructField("nullstr", StringType, true):: Nil)
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select nullstr, headers.Host from jsonTable"),
@@ -261,7 +261,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
@@ -302,7 +302,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
@@ -376,7 +376,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
test("GetField operation on complex data type") {
val jsonDF = spark.read.json(complexFieldAndType1)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
@@ -403,7 +403,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
@@ -464,7 +464,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
ignore("Type conflict in primitive field values (Ignored)") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
// Right now, the analyzer does not promote strings in a boolean expression.
// Number and Boolean conflict: resolve the type as boolean in this query.
@@ -528,7 +528,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
@@ -550,7 +550,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
@@ -580,7 +580,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
}
test("Loading a JSON dataset from a text file") {
@@ -601,7 +601,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
@@ -633,7 +633,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
@@ -674,7 +674,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
@@ -759,7 +759,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(expectedSchema === jsonDF.schema)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
@@ -885,7 +885,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(schema === jsonDF1.schema)
- jsonDF1.registerTempTable("jsonTable1")
+ jsonDF1.createOrReplaceTempView("jsonTable1")
checkAnswer(
sql("select * from jsonTable1"),
@@ -902,7 +902,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(schema === jsonDF2.schema)
- jsonDF2.registerTempTable("jsonTable2")
+ jsonDF2.createOrReplaceTempView("jsonTable2")
checkAnswer(
sql("select * from jsonTable2"),
@@ -921,7 +921,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val jsonWithSimpleMap = spark.read.schema(schemaWithSimpleMap).json(mapType1)
- jsonWithSimpleMap.registerTempTable("jsonWithSimpleMap")
+ jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap")
checkAnswer(
sql("select `map` from jsonWithSimpleMap"),
@@ -949,7 +949,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
val jsonWithComplexMap = spark.read.schema(schemaWithComplexMap).json(mapType2)
- jsonWithComplexMap.registerTempTable("jsonWithComplexMap")
+ jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap")
checkAnswer(
sql("select `map` from jsonWithComplexMap"),
@@ -974,7 +974,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
test("SPARK-2096 Correctly parse dot notations") {
val jsonDF = spark.read.json(complexFieldAndType2)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
@@ -992,7 +992,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
test("SPARK-3390 Complex arrays") {
val jsonDF = spark.read.json(complexFieldAndType2)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
@@ -1015,7 +1015,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
test("SPARK-3308 Read top level JSON arrays") {
val jsonDF = spark.read.json(jsonArray)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
@@ -1084,7 +1084,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempTable("jsonTable") {
val jsonDF = spark.read.json(corruptRecords)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("a", StringType, true) ::
@@ -1156,7 +1156,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
test("SPARK-4068: nulls in arrays") {
val jsonDF = spark.read.json(nullsInArrays)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
val schema = StructType(
StructField("field1",
@@ -1202,7 +1202,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
- df1.registerTempTable("applySchema1")
+ df1.createOrReplaceTempView("applySchema1")
val df2 = df1.toDF
val result = df2.toJSON.collect()
// scalastyle:off
@@ -1225,7 +1225,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
}
val df3 = spark.createDataFrame(rowRDD2, schema2)
- df3.registerTempTable("applySchema2")
+ df3.createOrReplaceTempView("applySchema2")
val df4 = df3.toDF
val result2 = df4.toJSON.collect()
@@ -1234,7 +1234,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
val jsonDF = spark.read.json(primitiveFieldAndType)
val primTable = spark.read.json(jsonDF.toJSON.rdd)
- primTable.registerTempTable("primitiveTable")
+ primTable.createOrReplaceTempView("primitiveTable")
checkAnswer(
sql("select * from primitiveTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
@@ -1247,7 +1247,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
val complexJsonDF = spark.read.json(complexFieldAndType1)
val compTable = spark.read.json(complexJsonDF.toJSON.rdd)
- compTable.registerTempTable("complexTable")
+ compTable.createOrReplaceTempView("complexTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from complexTable"),
@@ -1387,7 +1387,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
"col1",
"abd")
- spark.read.json(root.getAbsolutePath).registerTempTable("test_myjson_with_part")
+ spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part")
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abc'"), Row(4))
checkAnswer(sql(
@@ -1531,7 +1531,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
{
val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
@@ -1639,7 +1639,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
val schema = (new StructType).add("ts", TimestampType)
val jsonDF = spark.read.schema(schema).json(timestampAsLong)
- jsonDF.registerTempTable("jsonTable")
+ jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select ts from jsonTable"),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
index 8707e13461..847ea6bd52 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala
@@ -400,7 +400,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with ParquetTest with Sha
// Introduce _temporary dir to the base dir the robustness of the schema discovery process.
new File(base.getCanonicalPath, "_temporary").mkdir()
- spark.read.parquet(base.getCanonicalPath).registerTempTable("t")
+ spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
@@ -484,7 +484,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with ParquetTest with Sha
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
- spark.read.parquet(base.getCanonicalPath).registerTempTable("t")
+ spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
@@ -533,7 +533,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with ParquetTest with Sha
}
val parquetRelation = spark.read.format("parquet").load(base.getCanonicalPath)
- parquetRelation.registerTempTable("t")
+ parquetRelation.createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
@@ -573,7 +573,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with ParquetTest with Sha
}
val parquetRelation = spark.read.format("parquet").load(base.getCanonicalPath)
- parquetRelation.registerTempTable("t")
+ parquetRelation.createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
@@ -609,7 +609,7 @@ class ParquetPartitionDiscoverySuite extends QueryTest with ParquetTest with Sha
.option("mergeSchema", "true")
.format("parquet")
.load(base.getCanonicalPath)
- .registerTempTable("t")
+ .createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
index f9f9f80352..725e14c0fb 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
@@ -46,7 +46,7 @@ class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
- spark.createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
+ spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
// Query appends, don't test with both read modes.
withParquetTable(data, "t", false) {
sql("INSERT INTO TABLE t SELECT * FROM tmp")
@@ -58,7 +58,7 @@ class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
- spark.createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
+ spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withParquetTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
index 69a600a55b..487d7a7e5a 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadBenchmark.scala
@@ -75,10 +75,10 @@ object ParquetReadBenchmark {
withTempPath { dir =>
withTempTable("t1", "tempTable") {
- spark.range(values).registerTempTable("t1")
+ spark.range(values).createOrReplaceTempView("t1")
spark.sql("select cast(id as INT) as id from t1")
.write.parquet(dir.getCanonicalPath)
- spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+ spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
sqlBenchmark.addCase("SQL Parquet Vectorized") { iter =>
spark.sql("select sum(id) from tempTable").collect()
@@ -159,10 +159,10 @@ object ParquetReadBenchmark {
def intStringScanBenchmark(values: Int): Unit = {
withTempPath { dir =>
withTempTable("t1", "tempTable") {
- spark.range(values).registerTempTable("t1")
+ spark.range(values).createOrReplaceTempView("t1")
spark.sql("select cast(id as INT) as c1, cast(id as STRING) as c2 from t1")
.write.parquet(dir.getCanonicalPath)
- spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+ spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
val benchmark = new Benchmark("Int and String Scan", values)
@@ -193,10 +193,10 @@ object ParquetReadBenchmark {
def stringDictionaryScanBenchmark(values: Int): Unit = {
withTempPath { dir =>
withTempTable("t1", "tempTable") {
- spark.range(values).registerTempTable("t1")
+ spark.range(values).createOrReplaceTempView("t1")
spark.sql("select cast((id % 200) + 10000 as STRING) as c1 from t1")
.write.parquet(dir.getCanonicalPath)
- spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+ spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
val benchmark = new Benchmark("String Dictionary", values)
@@ -225,10 +225,10 @@ object ParquetReadBenchmark {
def partitionTableScanBenchmark(values: Int): Unit = {
withTempPath { dir =>
withTempTable("t1", "tempTable") {
- spark.range(values).registerTempTable("t1")
+ spark.range(values).createOrReplaceTempView("t1")
spark.sql("select id % 2 as p, cast(id as INT) as id from t1")
.write.partitionBy("p").parquet(dir.getCanonicalPath)
- spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+ spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
val benchmark = new Benchmark("Partitioned Table", values)
@@ -260,11 +260,11 @@ object ParquetReadBenchmark {
def stringWithNullsScanBenchmark(values: Int, fractionOfNulls: Double): Unit = {
withTempPath { dir =>
withTempTable("t1", "tempTable") {
- spark.range(values).registerTempTable("t1")
+ spark.range(values).createOrReplaceTempView("t1")
spark.sql(s"select IF(rand(1) < $fractionOfNulls, NULL, cast(id as STRING)) as c1, " +
s"IF(rand(2) < $fractionOfNulls, NULL, cast(id as STRING)) as c2 from t1")
.write.parquet(dir.getCanonicalPath)
- spark.read.parquet(dir.getCanonicalPath).registerTempTable("tempTable")
+ spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tempTable")
val benchmark = new Benchmark("String with Nulls Scan", values)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
index 08b7eb3cf7..228ae6f840 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/TPCDSBenchmark.scala
@@ -1187,7 +1187,7 @@ object TPCDSBenchmark {
def setupTables(dataLocation: String): Map[String, Long] = {
tables.map { tableName =>
- spark.read.parquet(s"$dataLocation/$tableName").registerTempTable(tableName)
+ spark.read.parquet(s"$dataLocation/$tableName").createOrReplaceTempView(tableName)
tableName -> spark.table(tableName).count()
}.toMap
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
index 1b82769428..08f596f130 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
@@ -165,7 +165,7 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
// Because SortMergeJoin may skip different rows if the number of partitions is different, this
// test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
- testDataForJoin.registerTempTable("testDataForJoin")
+ testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
@@ -183,7 +183,7 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
// Because SortMergeJoin may skip different rows if the number of partitions is different,
// this test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
- testDataForJoin.registerTempTable("testDataForJoin")
+ testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
@@ -237,7 +237,7 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
test("BroadcastNestedLoopJoin metrics") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
- testDataForJoin.registerTempTable("testDataForJoin")
+ testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> BroadcastNestedLoopJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
@@ -265,7 +265,7 @@ class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
test("CartesianProduct metrics") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
- testDataForJoin.registerTempTable("testDataForJoin")
+ testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempTable("testDataForJoin") {
// Assume the execution plan is
// ... -> CartesianProduct(nodeId = 1) -> TungstenProject(nodeId = 0)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index 44d1b9ddda..9c9abfeb2a 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -441,7 +441,7 @@ class JDBCSuite extends SparkFunSuite
test("test DATE types in cache") {
val rows = spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties).collect()
spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties)
- .cache().registerTempTable("mycached_date")
+ .cache().createOrReplaceTempView("mycached_date")
val cachedRows = sql("select * from mycached_date").collect()
assert(rows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01"))
assert(cachedRows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01"))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
index c1dc9b9834..03c18ad009 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
@@ -34,7 +34,7 @@ class CreateTableAsSelectSuite extends DataSourceTest with SharedSQLContext with
super.beforeAll()
path = Utils.createTempDir()
val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str${i}"}"""))
- caseInsensitiveContext.read.json(rdd).registerTempTable("jt")
+ caseInsensitiveContext.read.json(rdd).createOrReplaceTempView("jt")
}
override def afterAll(): Unit = {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
index 5ac39f54b9..854fec5b22 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
@@ -31,7 +31,7 @@ class InsertSuite extends DataSourceTest with SharedSQLContext {
super.beforeAll()
path = Utils.createTempDir()
val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""))
- caseInsensitiveContext.read.json(rdd).registerTempTable("jt")
+ caseInsensitiveContext.read.json(rdd).createOrReplaceTempView("jt")
sql(
s"""
|CREATE TEMPORARY TABLE jsonTable (a int, b string)
@@ -111,7 +111,7 @@ class InsertSuite extends DataSourceTest with SharedSQLContext {
// Writing the table to less part files.
val rdd1 = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""), 5)
- caseInsensitiveContext.read.json(rdd1).registerTempTable("jt1")
+ caseInsensitiveContext.read.json(rdd1).createOrReplaceTempView("jt1")
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt1
@@ -123,7 +123,7 @@ class InsertSuite extends DataSourceTest with SharedSQLContext {
// Writing the table to more part files.
val rdd2 = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""), 10)
- caseInsensitiveContext.read.json(rdd2).registerTempTable("jt2")
+ caseInsensitiveContext.read.json(rdd2).createOrReplaceTempView("jt2")
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt2
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
index bb2c54aa64..7738e4107d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
@@ -42,7 +42,7 @@ class SaveLoadSuite extends DataSourceTest with SharedSQLContext with BeforeAndA
val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str${i}"}"""))
df = caseInsensitiveContext.read.json(rdd)
- df.registerTempTable("jsonTable")
+ df.createOrReplaceTempView("jsonTable")
}
override def afterAll(): Unit = {
@@ -123,7 +123,7 @@ class SaveLoadSuite extends DataSourceTest with SharedSQLContext with BeforeAndA
// verify the append mode
df.write.mode(SaveMode.Append).json(path.toString)
val df2 = df.union(df)
- df2.registerTempTable("jsonTable2")
+ df2.createOrReplaceTempView("jsonTable2")
checkLoad(df2, "jsonTable2")
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
index 013b731693..b742206b58 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
@@ -77,7 +77,7 @@ class StreamSuite extends StreamTest with SharedSQLContext {
test("sql queries") {
val inputData = MemoryStream[Int]
- inputData.toDF().registerTempTable("stream")
+ inputData.toDF().createOrReplaceTempView("stream")
val evens = sql("SELECT * FROM stream WHERE value % 2 = 0")
testStream(evens)(
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
index 03369c5a48..421f6bca7f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
@@ -41,14 +41,14 @@ private[sql] trait SQLTestData { self =>
protected lazy val emptyTestData: DataFrame = {
val df = spark.sparkContext.parallelize(
Seq.empty[Int].map(i => TestData(i, i.toString))).toDF()
- df.registerTempTable("emptyTestData")
+ df.createOrReplaceTempView("emptyTestData")
df
}
protected lazy val testData: DataFrame = {
val df = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString))).toDF()
- df.registerTempTable("testData")
+ df.createOrReplaceTempView("testData")
df
}
@@ -60,7 +60,7 @@ private[sql] trait SQLTestData { self =>
TestData2(2, 2) ::
TestData2(3, 1) ::
TestData2(3, 2) :: Nil, 2).toDF()
- df.registerTempTable("testData2")
+ df.createOrReplaceTempView("testData2")
df
}
@@ -68,14 +68,14 @@ private[sql] trait SQLTestData { self =>
val df = spark.sparkContext.parallelize(
TestData3(1, None) ::
TestData3(2, Some(2)) :: Nil).toDF()
- df.registerTempTable("testData3")
+ df.createOrReplaceTempView("testData3")
df
}
protected lazy val negativeData: DataFrame = {
val df = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(-i, (-i).toString))).toDF()
- df.registerTempTable("negativeData")
+ df.createOrReplaceTempView("negativeData")
df
}
@@ -87,7 +87,7 @@ private[sql] trait SQLTestData { self =>
LargeAndSmallInts(2, 2) ::
LargeAndSmallInts(2147483646, 1) ::
LargeAndSmallInts(3, 2) :: Nil).toDF()
- df.registerTempTable("largeAndSmallInts")
+ df.createOrReplaceTempView("largeAndSmallInts")
df
}
@@ -99,7 +99,7 @@ private[sql] trait SQLTestData { self =>
DecimalData(2, 2) ::
DecimalData(3, 1) ::
DecimalData(3, 2) :: Nil).toDF()
- df.registerTempTable("decimalData")
+ df.createOrReplaceTempView("decimalData")
df
}
@@ -110,7 +110,7 @@ private[sql] trait SQLTestData { self =>
BinaryData("122".getBytes(StandardCharsets.UTF_8), 3) ::
BinaryData("121".getBytes(StandardCharsets.UTF_8), 2) ::
BinaryData("123".getBytes(StandardCharsets.UTF_8), 4) :: Nil).toDF()
- df.registerTempTable("binaryData")
+ df.createOrReplaceTempView("binaryData")
df
}
@@ -122,7 +122,7 @@ private[sql] trait SQLTestData { self =>
UpperCaseData(4, "D") ::
UpperCaseData(5, "E") ::
UpperCaseData(6, "F") :: Nil).toDF()
- df.registerTempTable("upperCaseData")
+ df.createOrReplaceTempView("upperCaseData")
df
}
@@ -132,7 +132,7 @@ private[sql] trait SQLTestData { self =>
LowerCaseData(2, "b") ::
LowerCaseData(3, "c") ::
LowerCaseData(4, "d") :: Nil).toDF()
- df.registerTempTable("lowerCaseData")
+ df.createOrReplaceTempView("lowerCaseData")
df
}
@@ -140,7 +140,7 @@ private[sql] trait SQLTestData { self =>
val rdd = spark.sparkContext.parallelize(
ArrayData(Seq(1, 2, 3), Seq(Seq(1, 2, 3))) ::
ArrayData(Seq(2, 3, 4), Seq(Seq(2, 3, 4))) :: Nil)
- rdd.toDF().registerTempTable("arrayData")
+ rdd.toDF().createOrReplaceTempView("arrayData")
rdd
}
@@ -151,13 +151,13 @@ private[sql] trait SQLTestData { self =>
MapData(Map(1 -> "a3", 2 -> "b3", 3 -> "c3")) ::
MapData(Map(1 -> "a4", 2 -> "b4")) ::
MapData(Map(1 -> "a5")) :: Nil)
- rdd.toDF().registerTempTable("mapData")
+ rdd.toDF().createOrReplaceTempView("mapData")
rdd
}
protected lazy val repeatedData: RDD[StringData] = {
val rdd = spark.sparkContext.parallelize(List.fill(2)(StringData("test")))
- rdd.toDF().registerTempTable("repeatedData")
+ rdd.toDF().createOrReplaceTempView("repeatedData")
rdd
}
@@ -165,7 +165,7 @@ private[sql] trait SQLTestData { self =>
val rdd = spark.sparkContext.parallelize(
List.fill(2)(StringData(null)) ++
List.fill(2)(StringData("test")))
- rdd.toDF().registerTempTable("nullableRepeatedData")
+ rdd.toDF().createOrReplaceTempView("nullableRepeatedData")
rdd
}
@@ -175,7 +175,7 @@ private[sql] trait SQLTestData { self =>
NullInts(2) ::
NullInts(3) ::
NullInts(null) :: Nil).toDF()
- df.registerTempTable("nullInts")
+ df.createOrReplaceTempView("nullInts")
df
}
@@ -185,7 +185,7 @@ private[sql] trait SQLTestData { self =>
NullInts(null) ::
NullInts(null) ::
NullInts(null) :: Nil).toDF()
- df.registerTempTable("allNulls")
+ df.createOrReplaceTempView("allNulls")
df
}
@@ -194,13 +194,13 @@ private[sql] trait SQLTestData { self =>
NullStrings(1, "abc") ::
NullStrings(2, "ABC") ::
NullStrings(3, null) :: Nil).toDF()
- df.registerTempTable("nullStrings")
+ df.createOrReplaceTempView("nullStrings")
df
}
protected lazy val tableName: DataFrame = {
val df = spark.sparkContext.parallelize(TableName("test") :: Nil).toDF()
- df.registerTempTable("tableName")
+ df.createOrReplaceTempView("tableName")
df
}
@@ -215,7 +215,7 @@ private[sql] trait SQLTestData { self =>
// An RDD with 4 elements and 8 partitions
protected lazy val withEmptyParts: RDD[IntField] = {
val rdd = spark.sparkContext.parallelize((1 to 4).map(IntField), 8)
- rdd.toDF().registerTempTable("withEmptyParts")
+ rdd.toDF().createOrReplaceTempView("withEmptyParts")
rdd
}
@@ -223,7 +223,7 @@ private[sql] trait SQLTestData { self =>
val df = spark.sparkContext.parallelize(
Person(0, "mike", 30) ::
Person(1, "jim", 20) :: Nil).toDF()
- df.registerTempTable("person")
+ df.createOrReplaceTempView("person")
df
}
@@ -231,7 +231,7 @@ private[sql] trait SQLTestData { self =>
val df = spark.sparkContext.parallelize(
Salary(0, 2000.0) ::
Salary(1, 1000.0) :: Nil).toDF()
- df.registerTempTable("salary")
+ df.createOrReplaceTempView("salary")
df
}
@@ -240,7 +240,7 @@ private[sql] trait SQLTestData { self =>
ComplexData(Map("1" -> 1), TestData(1, "1"), Seq(1, 1, 1), true) ::
ComplexData(Map("2" -> 2), TestData(2, "2"), Seq(2, 2, 2), false) ::
Nil).toDF()
- df.registerTempTable("complexData")
+ df.createOrReplaceTempView("complexData")
df
}
@@ -251,7 +251,7 @@ private[sql] trait SQLTestData { self =>
CourseSales("dotNET", 2012, 5000) ::
CourseSales("dotNET", 2013, 48000) ::
CourseSales("Java", 2013, 30000) :: Nil).toDF()
- df.registerTempTable("courseSales")
+ df.createOrReplaceTempView("courseSales")
df
}
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
index 64f2ded447..f664d5a4cd 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
@@ -57,7 +57,7 @@ public class JavaDataFrameSuite {
jsonObjects.add("{\"key\":" + i + ", \"value\":\"str" + i + "\"}");
}
df = hc.read().json(sc.parallelize(jsonObjects));
- df.registerTempTable("window_table");
+ df.createOrReplaceTempView("window_table");
}
@After
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
index f13c32db9d..e73117c814 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaMetastoreDataSourcesSuite.java
@@ -85,7 +85,7 @@ public class JavaMetastoreDataSourcesSuite {
}
JavaRDD<String> rdd = sc.parallelize(jsonObjects);
df = sqlContext.read().json(rdd);
- df.registerTempTable("jsonTable");
+ df.createOrReplaceTempView("jsonTable");
}
@After
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
index d96eb0169e..d2cb62c617 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ErrorPositionSuite.scala
@@ -33,8 +33,8 @@ class ErrorPositionSuite extends QueryTest with TestHiveSingleton with BeforeAnd
if (spark.wrapped.tableNames().contains("src")) {
spark.catalog.dropTempView("src")
}
- Seq((1, "")).toDF("key", "value").registerTempTable("src")
- Seq((1, 1, 1)).toDF("a", "a", "b").registerTempTable("dupAttributes")
+ Seq((1, "")).toDF("key", "value").createOrReplaceTempView("src")
+ Seq((1, 1, 1)).toDF("a", "a", "b").createOrReplaceTempView("dupAttributes")
}
override protected def afterEach(): Unit = {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
index b5af758a65..e2304b5397 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
@@ -51,7 +51,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest with TestHiveSingleton
test("Converting Hive to Parquet Table via saveAsParquetFile") {
withTempPath { dir =>
sql("SELECT * FROM src").write.parquet(dir.getCanonicalPath)
- hiveContext.read.parquet(dir.getCanonicalPath).registerTempTable("p")
+ hiveContext.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("p")
withTempTable("p") {
checkAnswer(
sql("SELECT * FROM src ORDER BY key"),
@@ -65,7 +65,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest with TestHiveSingleton
withParquetTable((1 to 10).map(i => (i, s"val_$i")), "t", false) {
withTempPath { file =>
sql("SELECT * FROM t LIMIT 1").write.parquet(file.getCanonicalPath)
- hiveContext.read.parquet(file.getCanonicalPath).registerTempTable("p")
+ hiveContext.read.parquet(file.getCanonicalPath).createOrReplaceTempView("p")
withTempTable("p") {
// let's do three overwrites for good measure
sql("INSERT OVERWRITE TABLE p SELECT * FROM t")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
index d05a3623ae..a4bbe96cf8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
@@ -355,7 +355,7 @@ object TemporaryHiveUDFTest extends Logging {
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
- source.registerTempTable("sourceTable")
+ source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
@@ -393,7 +393,7 @@ object PermanentHiveUDFTest1 extends Logging {
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
- source.registerTempTable("sourceTable")
+ source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
@@ -429,7 +429,7 @@ object PermanentHiveUDFTest2 extends Logging {
hiveContext.sessionState.catalog.createFunction(function, ignoreIfExists = false)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
- source.registerTempTable("sourceTable")
+ source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
@@ -491,7 +491,7 @@ object SparkSubmitClassLoaderTest extends Logging {
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
- source.registerTempTable("sourceTable")
+ source.createOrReplaceTempView("sourceTable")
// Load a Hive SerDe from the jar.
logInfo("Creating a Hive table with a SerDe provided in a jar.")
hiveContext.sql(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index 883cdac110..b256845620 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -45,8 +45,8 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
// Since every we are doing tests for DDL statements,
// it is better to reset before every test.
hiveContext.reset()
- // Register the testData, which will be used in every test.
- testData.registerTempTable("testData")
+ // Creates a temporary view with testData, which will be used in all tests.
+ testData.createOrReplaceTempView("testData")
}
test("insertInto() HiveTable") {
@@ -98,7 +98,7 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
val rowRDD = hiveContext.sparkContext.parallelize(
(1 to 100).map(i => Row(scala.collection.mutable.HashMap(s"key$i" -> s"value$i"))))
val df = hiveContext.createDataFrame(rowRDD, schema)
- df.registerTempTable("tableWithMapValue")
+ df.createOrReplaceTempView("tableWithMapValue")
sql("CREATE TABLE hiveTableWithMapValue(m MAP <STRING, STRING>)")
sql("INSERT OVERWRITE TABLE hiveTableWithMapValue SELECT m FROM tableWithMapValue")
@@ -171,7 +171,7 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
StructField("a", ArrayType(StringType, containsNull = false))))
val rowRDD = hiveContext.sparkContext.parallelize((1 to 100).map(i => Row(Seq(s"value$i"))))
val df = hiveContext.createDataFrame(rowRDD, schema)
- df.registerTempTable("tableWithArrayValue")
+ df.createOrReplaceTempView("tableWithArrayValue")
sql("CREATE TABLE hiveTableWithArrayValue(a Array <STRING>)")
sql("INSERT OVERWRITE TABLE hiveTableWithArrayValue SELECT a FROM tableWithArrayValue")
@@ -188,7 +188,7 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
val rowRDD = hiveContext.sparkContext.parallelize(
(1 to 100).map(i => Row(Map(s"key$i" -> s"value$i"))))
val df = hiveContext.createDataFrame(rowRDD, schema)
- df.registerTempTable("tableWithMapValue")
+ df.createOrReplaceTempView("tableWithMapValue")
sql("CREATE TABLE hiveTableWithMapValue(m Map <STRING, STRING>)")
sql("INSERT OVERWRITE TABLE hiveTableWithMapValue SELECT m FROM tableWithMapValue")
@@ -205,7 +205,7 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
val rowRDD = hiveContext.sparkContext.parallelize(
(1 to 100).map(i => Row(Row(s"value$i"))))
val df = hiveContext.createDataFrame(rowRDD, schema)
- df.registerTempTable("tableWithStructValue")
+ df.createOrReplaceTempView("tableWithStructValue")
sql("CREATE TABLE hiveTableWithStructValue(s Struct <f: STRING>)")
sql("INSERT OVERWRITE TABLE hiveTableWithStructValue SELECT s FROM tableWithStructValue")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index b507018e58..00adb9a44b 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -80,7 +80,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
""".stripMargin)
withTempTable("expectedJsonTable") {
- read.json(jsonFilePath).registerTempTable("expectedJsonTable")
+ read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
checkAnswer(
sql("SELECT a, b, `c_!@(3)`, `<d>`.`d!`, `<d>`.`=` FROM jsonTable"),
sql("SELECT a, b, `c_!@(3)`, `<d>`.`d!`, `<d>`.`=` FROM expectedJsonTable"))
@@ -110,7 +110,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
assert(expectedSchema === table("jsonTable").schema)
withTempTable("expectedJsonTable") {
- read.json(jsonFilePath).registerTempTable("expectedJsonTable")
+ read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
checkAnswer(
sql("SELECT b, `<d>`.`=` FROM jsonTable"),
sql("SELECT b, `<d>`.`=` FROM expectedJsonTable"))
@@ -248,7 +248,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
""".stripMargin)
withTempTable("expectedJsonTable") {
- read.json(jsonFilePath).registerTempTable("expectedJsonTable")
+ read.json(jsonFilePath).createOrReplaceTempView("expectedJsonTable")
checkAnswer(
sql("SELECT * FROM jsonTable"),
@@ -554,7 +554,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv
test("scan a parquet table created through a CTAS statement") {
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "true") {
withTempTable("jt") {
- (1 to 10).map(i => i -> s"str$i").toDF("a", "b").registerTempTable("jt")
+ (1 to 10).map(i => i -> s"str$i").toDF("a", "b").createOrReplaceTempView("jt")
withTable("test_parquet_ctas") {
sql(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
index 3f6418cbe8..ac89bbbf8e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
@@ -74,7 +74,7 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
val schema = spark.table("parquet_compat").schema
val rowRDD = spark.sparkContext.parallelize(rows).coalesce(1)
- spark.createDataFrame(rowRDD, schema).registerTempTable("data")
+ spark.createDataFrame(rowRDD, schema).createOrReplaceTempView("data")
spark.sql("INSERT INTO TABLE parquet_compat SELECT * FROM data")
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
index 78569c5808..cc05e56d66 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala
@@ -32,7 +32,7 @@ class QueryPartitionSuite extends QueryTest with SQLTestUtils with TestHiveSingl
withSQLConf((SQLConf.HIVE_VERIFY_PARTITION_PATH.key, "true")) {
val testData = sparkContext.parallelize(
(1 to 10).map(i => TestData(i, i.toString))).toDF()
- testData.registerTempTable("testData")
+ testData.createOrReplaceTempView("testData")
val tmpDir = Files.createTempDir()
// create the table for test
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
index 8060ef77e7..7011cd8122 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
@@ -115,7 +115,7 @@ class StatisticsSuite extends QueryTest with TestHiveSingleton {
sql("DROP TABLE analyzeTable_part").collect()
// Try to analyze a temp table
- sql("""SELECT * FROM src""").registerTempTable("tempTable")
+ sql("""SELECT * FROM src""").createOrReplaceTempView("tempTable")
intercept[UnsupportedOperationException] {
hiveContext.sql("ANALYZE TABLE tempTable COMPUTE STATISTICS")
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
index d1aa5aa931..d121bcbe15 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
@@ -53,7 +53,7 @@ class UDFSuite
sql("USE default")
testDF = (1 to 10).map(i => s"sTr$i").toDF("value")
- testDF.registerTempTable(testTableName)
+ testDF.createOrReplaceTempView(testTableName)
expectedDF = (1 to 10).map(i => s"STR$i").toDF("value")
super.beforeAll()
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
index c97b3f3197..a2bae2e81f 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
@@ -180,7 +180,7 @@ abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with Te
val emptyDF = spark.createDataFrame(
sparkContext.emptyRDD[Row],
StructType(StructField("key", StringType) :: StructField("value", IntegerType) :: Nil))
- emptyDF.registerTempTable("emptyTable")
+ emptyDF.createOrReplaceTempView("emptyTable")
// Register UDAFs
spark.udf.register("mydoublesum", new MyDoubleSum)
@@ -200,7 +200,7 @@ abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with Te
}
test("group by function") {
- Seq((1, 2)).toDF("a", "b").registerTempTable("data")
+ Seq((1, 2)).toDF("a", "b").createOrReplaceTempView("data")
checkAnswer(
sql("SELECT floor(a) AS a, collect_set(b) FROM data GROUP BY floor(a) ORDER BY a"),
@@ -783,7 +783,7 @@ abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with Te
(5, 8, 17),
(6, 2, 11)).toDF("a", "b", "c")
- covar_tab.registerTempTable("covar_tab")
+ covar_tab.createOrReplaceTempView("covar_tab")
checkAnswer(
spark.sql(
@@ -938,7 +938,7 @@ abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with Te
spark.createDataFrame(
sparkContext.parallelize(data, 2),
schema)
- .registerTempTable("noInputSchemaUDAF")
+ .createOrReplaceTempView("noInputSchemaUDAF")
checkAnswer(
spark.sql(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
index 17422ca1a0..131b06aec8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
@@ -80,7 +80,7 @@ class HiveExplainSuite extends QueryTest with SQLTestUtils with TestHiveSingleto
test("SPARK-6212: The EXPLAIN output of CTAS only shows the analyzed plan") {
withTempTable("jt") {
val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""))
- hiveContext.read.json(rdd).registerTempTable("jt")
+ hiveContext.read.json(rdd).createOrReplaceTempView("jt")
val outputs = sql(
s"""
|EXPLAIN EXTENDED
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
index b252c6ee2f..4d2f190b8e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveOperatorQueryableSuite.scala
@@ -29,8 +29,8 @@ class HiveOperatorQueryableSuite extends QueryTest with TestHiveSingleton {
test("SPARK-5324 query result of describe command") {
hiveContext.loadTestTable("src")
- // register a describe command to be a temp table
- sql("desc src").registerTempTable("mydesc")
+ // Creates a temporary view with the output of a describe command
+ sql("desc src").createOrReplaceTempView("mydesc")
checkAnswer(
sql("desc mydesc"),
Seq(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
index d8d3448add..78c0d1f97e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HivePlanTest.scala
@@ -28,7 +28,7 @@ class HivePlanTest extends QueryTest with TestHiveSingleton {
import hiveContext.implicits._
test("udf constant folding") {
- Seq.empty[Tuple1[Int]].toDF("a").registerTempTable("t")
+ Seq.empty[Tuple1[Int]].toDF("a").createOrReplaceTempView("t")
val optimized = sql("SELECT cos(null) AS c FROM t").queryExecution.optimizedPlan
val correctAnswer = sql("SELECT cast(null as double) AS c FROM t").queryExecution.optimizedPlan
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index 19f8cb3877..2aaaaadb6a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -685,12 +685,12 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
createQueryTest("case sensitivity when query Hive table",
"SELECT srcalias.KEY, SRCALIAS.value FROM sRc SrCAlias WHERE SrCAlias.kEy < 15")
- test("case sensitivity: registered table") {
+ test("case sensitivity: created temporary view") {
val testData =
TestHive.sparkContext.parallelize(
TestData(1, "str1") ::
TestData(2, "str2") :: Nil)
- testData.toDF().registerTempTable("REGisteredTABle")
+ testData.toDF().createOrReplaceTempView("REGisteredTABle")
assertResult(Array(Row(2, "str2"))) {
sql("SELECT tablealias.A, TABLEALIAS.b FROM reGisteredTABle TableAlias " +
@@ -715,7 +715,7 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
test("SPARK-2180: HAVING support in GROUP BY clauses (positive)") {
val fixture = List(("foo", 2), ("bar", 1), ("foo", 4), ("bar", 3))
.zipWithIndex.map {case ((value, attr), key) => HavingRow(key, value, attr)}
- TestHive.sparkContext.parallelize(fixture).toDF().registerTempTable("having_test")
+ TestHive.sparkContext.parallelize(fixture).toDF().createOrReplaceTempView("having_test")
val results =
sql("SELECT value, max(attr) AS attr FROM having_test GROUP BY value HAVING attr > 3")
.collect()
@@ -819,12 +819,12 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
.collect()
}
- // Describe a registered temporary table.
+ // Describe a temporary view.
val testData =
TestHive.sparkContext.parallelize(
TestData(1, "str1") ::
TestData(1, "str2") :: Nil)
- testData.toDF().registerTempTable("test_describe_commands2")
+ testData.toDF().createOrReplaceTempView("test_describe_commands2")
assertResult(
Array(
@@ -996,9 +996,9 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
}
}
- test("SPARK-3414 regression: should store analyzed logical plan when registering a temp table") {
- sparkContext.makeRDD(Seq.empty[LogEntry]).toDF().registerTempTable("rawLogs")
- sparkContext.makeRDD(Seq.empty[LogFile]).toDF().registerTempTable("logFiles")
+ test("SPARK-3414 regression: should store analyzed logical plan when creating a temporary view") {
+ sparkContext.makeRDD(Seq.empty[LogEntry]).toDF().createOrReplaceTempView("rawLogs")
+ sparkContext.makeRDD(Seq.empty[LogFile]).toDF().createOrReplaceTempView("logFiles")
sql(
"""
@@ -1009,7 +1009,7 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
FROM logFiles
) files
ON rawLogs.filename = files.name
- """).registerTempTable("boom")
+ """).createOrReplaceTempView("boom")
// This should be successfully analyzed
sql("SELECT * FROM boom").queryExecution.analyzed
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
index dd13b83928..b2f19d7753 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
@@ -32,14 +32,14 @@ class HiveResolutionSuite extends HiveComparisonTest {
test("SPARK-3698: case insensitive test for nested data") {
read.json(sparkContext.makeRDD(
- """{"a": [{"a": {"a": 1}}]}""" :: Nil)).registerTempTable("nested")
+ """{"a": [{"a": {"a": 1}}]}""" :: Nil)).createOrReplaceTempView("nested")
// This should be successfully analyzed
sql("SELECT a[0].A.A from nested").queryExecution.analyzed
}
test("SPARK-5278: check ambiguous reference to fields") {
read.json(sparkContext.makeRDD(
- """{"a": [{"b": 1, "B": 2}]}""" :: Nil)).registerTempTable("nested")
+ """{"a": [{"b": 1, "B": 2}]}""" :: Nil)).createOrReplaceTempView("nested")
// there are 2 filed matching field name "b", we should report Ambiguous reference error
val exception = intercept[AnalysisException] {
@@ -78,7 +78,7 @@ class HiveResolutionSuite extends HiveComparisonTest {
test("case insensitivity with scala reflection") {
// Test resolution with Scala Reflection
sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: Nil)
- .toDF().registerTempTable("caseSensitivityTest")
+ .toDF().createOrReplaceTempView("caseSensitivityTest")
val query = sql("SELECT a, b, A, B, n.a, n.b, n.A, n.B FROM caseSensitivityTest")
assert(query.schema.fields.map(_.name) === Seq("a", "b", "A", "B", "a", "b", "A", "B"),
@@ -89,14 +89,14 @@ class HiveResolutionSuite extends HiveComparisonTest {
ignore("case insensitivity with scala reflection joins") {
// Test resolution with Scala Reflection
sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: Nil)
- .toDF().registerTempTable("caseSensitivityTest")
+ .toDF().createOrReplaceTempView("caseSensitivityTest")
sql("SELECT * FROM casesensitivitytest a JOIN casesensitivitytest b ON a.a = b.a").collect()
}
test("nested repeated resolution") {
sparkContext.parallelize(Data(1, 2, Nested(1, 2), Seq(Nested(1, 2))) :: Nil)
- .toDF().registerTempTable("nestedRepeatedTest")
+ .toDF().createOrReplaceTempView("nestedRepeatedTest")
assert(sql("SELECT nestedArray[0].a FROM nestedRepeatedTest").collect().head(0) === 1)
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
index 8c9c37fece..60f8be5e0e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
@@ -84,7 +84,7 @@ class HiveTableScanSuite extends HiveComparisonTest {
sql("""insert into table spark_4959 select "hi" from src limit 1""")
table("spark_4959").select(
'col1.as("CaseSensitiveColName"),
- 'col1.as("CaseSensitiveColName2")).registerTempTable("spark_4959_2")
+ 'col1.as("CaseSensitiveColName2")).createOrReplaceTempView("spark_4959_2")
assert(sql("select CaseSensitiveColName from spark_4959_2").head() === Row("hi"))
assert(sql("select casesensitivecolname from spark_4959_2").head() === Row("hi"))
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index 521964eb4e..23b7f6c75b 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -153,7 +153,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
test("UDFIntegerToString") {
val testData = hiveContext.sparkContext.parallelize(
IntegerCaseClass(1) :: IntegerCaseClass(2) :: Nil).toDF()
- testData.registerTempTable("integerTable")
+ testData.createOrReplaceTempView("integerTable")
val udfName = classOf[UDFIntegerToString].getName
sql(s"CREATE TEMPORARY FUNCTION testUDFIntegerToString AS '$udfName'")
@@ -167,7 +167,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
test("UDFToListString") {
val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
- testData.registerTempTable("inputTable")
+ testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToListString AS '${classOf[UDFToListString].getName}'")
val errMsg = intercept[AnalysisException] {
@@ -182,7 +182,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
test("UDFToListInt") {
val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
- testData.registerTempTable("inputTable")
+ testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToListInt AS '${classOf[UDFToListInt].getName}'")
val errMsg = intercept[AnalysisException] {
@@ -197,7 +197,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
test("UDFToStringIntMap") {
val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
- testData.registerTempTable("inputTable")
+ testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToStringIntMap " +
s"AS '${classOf[UDFToStringIntMap].getName}'")
@@ -213,7 +213,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
test("UDFToIntIntMap") {
val testData = hiveContext.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
- testData.registerTempTable("inputTable")
+ testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToIntIntMap " +
s"AS '${classOf[UDFToIntIntMap].getName}'")
@@ -232,7 +232,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
ListListIntCaseClass(Nil) ::
ListListIntCaseClass(Seq((1, 2, 3))) ::
ListListIntCaseClass(Seq((4, 5, 6), (7, 8, 9))) :: Nil).toDF()
- testData.registerTempTable("listListIntTable")
+ testData.createOrReplaceTempView("listListIntTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFListListInt AS '${classOf[UDFListListInt].getName}'")
checkAnswer(
@@ -247,7 +247,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
val testData = hiveContext.sparkContext.parallelize(
ListStringCaseClass(Seq("a", "b", "c")) ::
ListStringCaseClass(Seq("d", "e")) :: Nil).toDF()
- testData.registerTempTable("listStringTable")
+ testData.createOrReplaceTempView("listStringTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFListString AS '${classOf[UDFListString].getName}'")
checkAnswer(
@@ -261,7 +261,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
test("UDFStringString") {
val testData = hiveContext.sparkContext.parallelize(
StringCaseClass("world") :: StringCaseClass("goodbye") :: Nil).toDF()
- testData.registerTempTable("stringTable")
+ testData.createOrReplaceTempView("stringTable")
sql(s"CREATE TEMPORARY FUNCTION testStringStringUDF AS '${classOf[UDFStringString].getName}'")
checkAnswer(
@@ -283,7 +283,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
ListListIntCaseClass(Seq((1, 2, 3))) ::
ListListIntCaseClass(Seq((4, 5, 6), (7, 8, 9))) ::
Nil).toDF()
- testData.registerTempTable("TwoListTable")
+ testData.createOrReplaceTempView("TwoListTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFTwoListList AS '${classOf[UDFTwoListList].getName}'")
checkAnswer(
@@ -295,7 +295,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
}
test("Hive UDFs with insufficient number of input arguments should trigger an analysis error") {
- Seq((1, 2)).toDF("a", "b").registerTempTable("testUDF")
+ Seq((1, 2)).toDF("a", "b").createOrReplaceTempView("testUDF")
{
// HiveSimpleUDF
@@ -352,7 +352,7 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
test("Hive UDF in group by") {
withTempTable("tab1") {
- Seq(Tuple1(1451400761)).toDF("test_date").registerTempTable("tab1")
+ Seq(Tuple1(1451400761)).toDF("test_date").createOrReplaceTempView("tab1")
sql(s"CREATE TEMPORARY FUNCTION testUDFToDate AS '${classOf[GenericUDFToDate].getName}'")
val count = sql("select testUDFToDate(cast(test_date as timestamp))" +
" from tab1 group by testUDFToDate(cast(test_date as timestamp))").count()
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index ac9a3930fd..81f3ea8a6e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -102,14 +102,14 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-6835: udtf in lateral view") {
val df = Seq((1, 1)).toDF("c1", "c2")
- df.registerTempTable("table1")
+ df.createOrReplaceTempView("table1")
val query = sql("SELECT c1, v FROM table1 LATERAL VIEW stack(3, 1, c1 + 1, c1 + 2) d AS v")
checkAnswer(query, Row(1, 1) :: Row(1, 2) :: Row(1, 3) :: Nil)
}
test("SPARK-13651: generator outputs shouldn't be resolved from its child's output") {
withTempTable("src") {
- Seq(("id1", "value1")).toDF("key", "value").registerTempTable("src")
+ Seq(("id1", "value1")).toDF("key", "value").createOrReplaceTempView("src")
val query =
sql("SELECT genoutput.* FROM src " +
"LATERAL VIEW explode(map('key1', 100, 'key2', 200)) genoutput AS key, value")
@@ -135,8 +135,8 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
Order(1, "Atlas", "MTB", 434, "2015-01-07", "John D", "Pacifica", "CA", 20151),
Order(11, "Swift", "YFlikr", 137, "2015-01-23", "John D", "Hayward", "CA", 20151))
- orders.toDF.registerTempTable("orders1")
- orderUpdates.toDF.registerTempTable("orderupdates1")
+ orders.toDF.createOrReplaceTempView("orders1")
+ orderUpdates.toDF.createOrReplaceTempView("orderupdates1")
sql(
"""CREATE TABLE orders(
@@ -305,7 +305,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-5371: union with null and sum") {
val df = Seq((1, 1)).toDF("c1", "c2")
- df.registerTempTable("table1")
+ df.createOrReplaceTempView("table1")
val query = sql(
"""
@@ -329,7 +329,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("CTAS with WITH clause") {
val df = Seq((1, 1)).toDF("c1", "c2")
- df.registerTempTable("table1")
+ df.createOrReplaceTempView("table1")
sql(
"""
@@ -346,7 +346,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
}
test("explode nested Field") {
- Seq(NestedArray1(NestedArray2(Seq(1, 2, 3)))).toDF.registerTempTable("nestedArray")
+ Seq(NestedArray1(NestedArray2(Seq(1, 2, 3)))).toDF.createOrReplaceTempView("nestedArray")
checkAnswer(
sql("SELECT ints FROM nestedArray LATERAL VIEW explode(a.b) a AS ints"),
Row(1) :: Row(2) :: Row(3) :: Nil)
@@ -543,7 +543,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
}
test("specifying the column list for CTAS") {
- Seq((1, "111111"), (2, "222222")).toDF("key", "value").registerTempTable("mytable1")
+ Seq((1, "111111"), (2, "222222")).toDF("key", "value").createOrReplaceTempView("mytable1")
sql("create table gen__tmp(a int, b string) as select key, value from mytable1")
checkAnswer(
@@ -598,7 +598,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("double nested data") {
sparkContext.parallelize(Nested1(Nested2(Nested3(1))) :: Nil)
- .toDF().registerTempTable("nested")
+ .toDF().createOrReplaceTempView("nested")
checkAnswer(
sql("SELECT f1.f2.f3 FROM nested"),
Row(1))
@@ -682,7 +682,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-4963 DataFrame sample on mutable row return wrong result") {
sql("SELECT * FROM src WHERE key % 2 = 0")
.sample(withReplacement = false, fraction = 0.3)
- .registerTempTable("sampled")
+ .createOrReplaceTempView("sampled")
(1 to 10).foreach { i =>
checkAnswer(
sql("SELECT * FROM sampled WHERE key % 2 = 1"),
@@ -707,7 +707,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
val rowRdd = sparkContext.parallelize(row :: Nil)
- hiveContext.createDataFrame(rowRdd, schema).registerTempTable("testTable")
+ hiveContext.createDataFrame(rowRdd, schema).createOrReplaceTempView("testTable")
sql(
"""CREATE TABLE nullValuesInInnerComplexTypes
@@ -733,14 +733,14 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-4296 Grouping field with Hive UDF as sub expression") {
val rdd = sparkContext.makeRDD( """{"a": "str", "b":"1", "c":"1970-01-01 00:00:00"}""" :: Nil)
- read.json(rdd).registerTempTable("data")
+ read.json(rdd).createOrReplaceTempView("data")
checkAnswer(
sql("SELECT concat(a, '-', b), year(c) FROM data GROUP BY concat(a, '-', b), year(c)"),
Row("str-1", 1970))
dropTempTable("data")
- read.json(rdd).registerTempTable("data")
+ read.json(rdd).createOrReplaceTempView("data")
checkAnswer(sql("SELECT year(c) + 1 FROM data GROUP BY year(c) + 1"), Row(1971))
dropTempTable("data")
@@ -748,14 +748,14 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("resolve udtf in projection #1") {
val rdd = sparkContext.makeRDD((1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}"""))
- read.json(rdd).registerTempTable("data")
+ read.json(rdd).createOrReplaceTempView("data")
val df = sql("SELECT explode(a) AS val FROM data")
val col = df("val")
}
test("resolve udtf in projection #2") {
val rdd = sparkContext.makeRDD((1 to 2).map(i => s"""{"a":[$i, ${i + 1}]}"""))
- read.json(rdd).registerTempTable("data")
+ read.json(rdd).createOrReplaceTempView("data")
checkAnswer(sql("SELECT explode(map(1, 1)) FROM data LIMIT 1"), Row(1, 1) :: Nil)
checkAnswer(sql("SELECT explode(map(1, 1)) as (k1, k2) FROM data LIMIT 1"), Row(1, 1) :: Nil)
intercept[AnalysisException] {
@@ -770,7 +770,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
// TGF with non-TGF in project is allowed in Spark SQL, but not in Hive
test("TGF with non-TGF in projection") {
val rdd = sparkContext.makeRDD( """{"a": "1", "b":"1"}""" :: Nil)
- read.json(rdd).registerTempTable("data")
+ read.json(rdd).createOrReplaceTempView("data")
checkAnswer(
sql("SELECT explode(map(a, b)) as (k1, k2), a, b FROM data"),
Row("1", "1", "1", "1") :: Nil)
@@ -784,7 +784,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
// PreInsertionCasts will actually start to work before ImplicitGenerate and then
// generates an invalid query plan.
val rdd = sparkContext.makeRDD((1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}"""))
- read.json(rdd).registerTempTable("data")
+ read.json(rdd).createOrReplaceTempView("data")
val originalConf = sessionState.convertCTAS
setConf(HiveUtils.CONVERT_CTAS, false)
@@ -824,7 +824,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
Seq.empty[(java.math.BigDecimal, java.math.BigDecimal)]
.toDF("d1", "d2")
.select($"d1".cast(DecimalType(10, 5)).as("d"))
- .registerTempTable("dn")
+ .createOrReplaceTempView("dn")
sql("select d from dn union all select d * 2 from dn")
.queryExecution.analyzed
@@ -832,27 +832,27 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("Star Expansion - script transform") {
val data = (1 to 100000).map { i => (i, i, i) }
- data.toDF("d1", "d2", "d3").registerTempTable("script_trans")
+ data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(100000 === sql("SELECT TRANSFORM (*) USING 'cat' FROM script_trans").count())
}
test("test script transform for stdout") {
val data = (1 to 100000).map { i => (i, i, i) }
- data.toDF("d1", "d2", "d3").registerTempTable("script_trans")
+ data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(100000 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat' AS (a,b,c) FROM script_trans").count())
}
test("test script transform for stderr") {
val data = (1 to 100000).map { i => (i, i, i) }
- data.toDF("d1", "d2", "d3").registerTempTable("script_trans")
+ data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(0 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat 1>&2' AS (a,b,c) FROM script_trans").count())
}
test("test script transform data type") {
val data = (1 to 5).map { i => (i, i) }
- data.toDF("key", "value").registerTempTable("test")
+ data.toDF("key", "value").createOrReplaceTempView("test")
checkAnswer(
sql("""FROM
|(FROM test SELECT TRANSFORM(key, value) USING 'cat' AS (`thing1` int, thing2 string)) t
@@ -864,7 +864,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
withTempTable("data") {
spark.range(1, 5)
.select(array($"id", $"id" + 1).as("a"), $"id".as("b"), (lit(10) - $"id").as("c"))
- .registerTempTable("data")
+ .createOrReplaceTempView("data")
// case 1: missing sort columns are resolvable if join is true
checkAnswer(
@@ -888,7 +888,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
}
test("test case key when") {
- (1 to 5).map(i => (i, i.toString)).toDF("k", "v").registerTempTable("t")
+ (1 to 5).map(i => (i, i.toString)).toDF("k", "v").createOrReplaceTempView("t")
checkAnswer(
sql("SELECT CASE k WHEN 2 THEN 22 WHEN 4 THEN 44 ELSE 0 END, v FROM t"),
Row(0, "1") :: Row(22, "2") :: Row(0, "3") :: Row(44, "4") :: Row(0, "5") :: Nil)
@@ -897,7 +897,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-7269 Check analysis failed in case in-sensitive") {
Seq(1, 2, 3).map { i =>
(i.toString, i.toString)
- }.toDF("key", "value").registerTempTable("df_analysis")
+ }.toDF("key", "value").createOrReplaceTempView("df_analysis")
sql("SELECT kEy from df_analysis group by key").collect()
sql("SELECT kEy+3 from df_analysis group by key+3").collect()
sql("SELECT kEy+3, a.kEy, A.kEy from df_analysis A group by key").collect()
@@ -1031,7 +1031,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-8588 HiveTypeCoercion.inConversion fires too early") {
val df =
createDataFrame(Seq((1, "2014-01-01"), (2, "2015-01-01"), (3, "2016-01-01")))
- df.toDF("id", "datef").registerTempTable("test_SPARK8588")
+ df.toDF("id", "datef").createOrReplaceTempView("test_SPARK8588")
checkAnswer(
sql(
"""
@@ -1046,7 +1046,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-9371: fix the support for special chars in column names for hive context") {
read.json(sparkContext.makeRDD(
"""{"a": {"c.b": 1}, "b.$q": [{"a@!.q": 1}], "q.w": {"w.i&": [1]}}""" :: Nil))
- .registerTempTable("t")
+ .createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.`c.b`, `b.$q`[0].`a@!.q`, `q.w`.`w.i&`[0] FROM t"), Row(1, 1, 1))
}
@@ -1125,7 +1125,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
spark
.range(5)
.selectExpr("id AS a", "id AS b")
- .registerTempTable("test")
+ .createOrReplaceTempView("test")
checkAnswer(
sql(
@@ -1143,7 +1143,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
spark
.range(5)
.selectExpr("id AS a", "id AS b")
- .registerTempTable("test")
+ .createOrReplaceTempView("test")
val df = sql(
"""FROM test
@@ -1162,7 +1162,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
test("SPARK-10741: Sort on Aggregate using parquet") {
withTable("test10741") {
withTempTable("src") {
- Seq("a" -> 5, "a" -> 9, "b" -> 6).toDF().registerTempTable("src")
+ Seq("a" -> 5, "a" -> 9, "b" -> 6).toDF().createOrReplaceTempView("src")
sql("CREATE TABLE test10741(c1 STRING, c2 INT) STORED AS PARQUET AS SELECT * FROM src")
}
@@ -1374,7 +1374,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
withTempTable("t1") {
spark.range(10)
.select(array($"id", $"id" + 1).as("arr"), $"id")
- .registerTempTable("source")
+ .createOrReplaceTempView("source")
withTable("dest1", "dest2") {
sql("CREATE TABLE dest1 (i INT)")
sql("CREATE TABLE dest2 (i INT)")
@@ -1407,7 +1407,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
withTempTable("t1", "t2") {
val path = dir.getCanonicalPath
val ds = spark.range(10)
- ds.registerTempTable("t1")
+ ds.createOrReplaceTempView("t1")
sql(
s"""CREATE TEMPORARY TABLE t2
@@ -1431,7 +1431,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
"shouldn always be used together with PATH data source option"
) {
withTempTable("t") {
- spark.range(10).registerTempTable("t")
+ spark.range(10).createOrReplaceTempView("t")
val message = intercept[IllegalArgumentException] {
sql(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLWindowFunctionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLWindowFunctionSuite.scala
index 4d284e1042..47ceefb88e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLWindowFunctionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLWindowFunctionSuite.scala
@@ -40,7 +40,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
- sparkContext.parallelize(data).toDF().registerTempTable("windowData")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
@@ -112,7 +112,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
- sparkContext.parallelize(data).toDF().registerTempTable("windowData")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
@@ -139,7 +139,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
- sparkContext.parallelize(data).toDF().registerTempTable("windowData")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
@@ -182,7 +182,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
- sparkContext.parallelize(data).toDF().registerTempTable("windowData")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
val e = intercept[AnalysisException] {
sql(
@@ -203,7 +203,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
- sparkContext.parallelize(data).toDF().registerTempTable("windowData")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
@@ -232,7 +232,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
WindowData(5, "c", 9),
WindowData(6, "c", 11)
)
- sparkContext.parallelize(data).toDF().registerTempTable("windowData")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql("select month, product, sum(product + 1) over() from windowData order by area"),
@@ -301,7 +301,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
WindowData(5, "c", 9),
WindowData(6, "c", 11)
)
- sparkContext.parallelize(data).toDF().registerTempTable("windowData")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("windowData")
checkAnswer(
sql(
@@ -322,7 +322,7 @@ class SQLWindowFunctionSuite extends QueryTest with SQLTestUtils with TestHiveSi
test("window function: multiple window expressions in a single expression") {
val nums = sparkContext.parallelize(1 to 10).map(x => (x, x % 2)).toDF("x", "y")
- nums.registerTempTable("nums")
+ nums.createOrReplaceTempView("nums")
val expected =
Row(1, 1, 1, 55, 1, 57) ::
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala
index 6161412a49..fed0d11e9d 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala
@@ -90,7 +90,7 @@ class OrcPartitionDiscoverySuite extends QueryTest with TestHiveSingleton with B
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
- read.orc(base.getCanonicalPath).registerTempTable("t")
+ read.orc(base.getCanonicalPath).createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
@@ -137,7 +137,7 @@ class OrcPartitionDiscoverySuite extends QueryTest with TestHiveSingleton with B
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
- read.orc(base.getCanonicalPath).registerTempTable("t")
+ read.orc(base.getCanonicalPath).createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
@@ -189,7 +189,7 @@ class OrcPartitionDiscoverySuite extends QueryTest with TestHiveSingleton with B
read
.option(ConfVars.DEFAULTPARTITIONNAME.varname, defaultPartitionName)
.orc(base.getCanonicalPath)
- .registerTempTable("t")
+ .createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
@@ -231,7 +231,7 @@ class OrcPartitionDiscoverySuite extends QueryTest with TestHiveSingleton with B
read
.option(ConfVars.DEFAULTPARTITIONNAME.varname, defaultPartitionName)
.orc(base.getCanonicalPath)
- .registerTempTable("t")
+ .createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
index 9a0885822b..f83b3a3de2 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
@@ -98,7 +98,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
test("Creating case class RDD table") {
val data = (1 to 100).map(i => (i, s"val_$i"))
- sparkContext.parallelize(data).toDF().registerTempTable("t")
+ sparkContext.parallelize(data).toDF().createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(sql("SELECT * FROM t"), data.toDF().collect())
}
@@ -223,7 +223,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
- createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
+ createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcTable(data, "t") {
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(table("t"), (data ++ data).map(Row.fromTuple))
@@ -233,7 +233,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
- createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
+ createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(table("t"), data.map(Row.fromTuple))
@@ -324,7 +324,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
""".stripMargin)
val emptyDF = Seq.empty[(Int, String)].toDF("key", "value").coalesce(1)
- emptyDF.registerTempTable("empty")
+ emptyDF.createOrReplaceTempView("empty")
// This creates 1 empty ORC file with Hive ORC SerDe. We are using this trick because
// Spark SQL ORC data source always avoids write empty ORC files.
@@ -340,7 +340,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
assert(errorMessage.contains("Unable to infer schema for ORC"))
val singleRowDF = Seq((0, "foo")).toDF("key", "value").coalesce(1)
- singleRowDF.registerTempTable("single")
+ singleRowDF.createOrReplaceTempView("single")
spark.sql(
s"""INSERT INTO TABLE empty_orc
@@ -422,7 +422,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
""".stripMargin)
val singleRowDF = Seq((0, "foo")).toDF("key", "value").coalesce(1)
- singleRowDF.registerTempTable("single")
+ singleRowDF.createOrReplaceTempView("single")
spark.sql(
s"""INSERT INTO TABLE dummy_orc
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala
index 96a7364437..6081d86f44 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala
@@ -51,7 +51,7 @@ abstract class OrcSuite extends QueryTest with TestHiveSingleton with BeforeAndA
.makeRDD(1 to 10)
.map(i => OrcData(i, s"part-$i"))
.toDF()
- .registerTempTable(s"orc_temp_table")
+ .createOrReplaceTempView(s"orc_temp_table")
sql(
s"""CREATE EXTERNAL TABLE normal_orc(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
index f52c6e48c5..7fe158c218 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
@@ -171,8 +171,9 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest {
sql(s"ALTER TABLE partitioned_parquet_with_complextypes ADD PARTITION (p=$p)")
}
- (1 to 10).map(i => (i, s"str$i")).toDF("a", "b").registerTempTable("jt")
- (1 to 10).map(i => Tuple1(Seq(new Integer(i), null))).toDF("a").registerTempTable("jt_array")
+ (1 to 10).map(i => (i, s"str$i")).toDF("a", "b").createOrReplaceTempView("jt")
+ (1 to 10).map(i => Tuple1(Seq(new Integer(i), null))).toDF("a")
+ .createOrReplaceTempView("jt_array")
setConf(HiveUtils.CONVERT_METASTORE_PARQUET, true)
}
@@ -541,8 +542,8 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest {
|STORED AS PARQUET
""".stripMargin)
- // Temp table to insert data into partitioned table
- Seq("foo", "bar").toDF("a").registerTempTable("test_temp")
+ // Temp view that is used to insert data into partitioned table
+ Seq("foo", "bar").toDF("a").createOrReplaceTempView("test_temp")
sql("INSERT INTO test_added_partitions PARTITION(b='0') SELECT a FROM test_temp")
checkAnswer(
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
index 78d2dc28d6..a3183f2977 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
@@ -91,7 +91,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
yield Row(s"val_$i", s"val_$i", s"val_$i", s"val_$i", 1, 1, 1, 1))
// Self-join
- df.registerTempTable("t")
+ df.createOrReplaceTempView("t")
withTempTable("t") {
checkAnswer(
sql(
@@ -337,7 +337,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
}
test("saveAsTable()/load() - non-partitioned table - ErrorIfExists") {
- Seq.empty[(Int, String)].toDF().registerTempTable("t")
+ Seq.empty[(Int, String)].toDF().createOrReplaceTempView("t")
withTempTable("t") {
intercept[AnalysisException] {
@@ -347,7 +347,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
}
test("saveAsTable()/load() - non-partitioned table - Ignore") {
- Seq.empty[(Int, String)].toDF().registerTempTable("t")
+ Seq.empty[(Int, String)].toDF().createOrReplaceTempView("t")
withTempTable("t") {
testDF.write.format(dataSourceName).mode(SaveMode.Ignore).saveAsTable("t")
@@ -459,7 +459,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
}
test("saveAsTable()/load() - partitioned table - ErrorIfExists") {
- Seq.empty[(Int, String)].toDF().registerTempTable("t")
+ Seq.empty[(Int, String)].toDF().createOrReplaceTempView("t")
withTempTable("t") {
intercept[AnalysisException] {
@@ -474,7 +474,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
}
test("saveAsTable()/load() - partitioned table - Ignore") {
- Seq.empty[(Int, String)].toDF().registerTempTable("t")
+ Seq.empty[(Int, String)].toDF().createOrReplaceTempView("t")
withTempTable("t") {
partitionedTestDF.write
diff --git a/sql/hivecontext-compatibility/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala b/sql/hivecontext-compatibility/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala
index 5df674d60e..1c1db72e27 100644
--- a/sql/hivecontext-compatibility/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala
+++ b/sql/hivecontext-compatibility/src/test/scala/org/apache/spark/sql/hive/HiveContextCompatibilitySuite.scala
@@ -66,7 +66,7 @@ class HiveContextCompatibilitySuite extends SparkFunSuite with BeforeAndAfterEac
val res = df3.collect()
val expected = Seq((18, 18, 8)).toDF("a", "x", "b").collect()
assert(res.toSeq == expected.toSeq)
- df3.registerTempTable("mai_table")
+ df3.createOrReplaceTempView("mai_table")
val df4 = hc.table("mai_table")
val res2 = df4.collect()
assert(res2.toSeq == expected.toSeq)
@@ -82,7 +82,7 @@ class HiveContextCompatibilitySuite extends SparkFunSuite with BeforeAndAfterEac
val databases2 = hc.sql("SHOW DATABASES").collect().map(_.getString(0))
assert(databases2.toSet == Set("default", "mee_db"))
val df = (1 to 10).map { i => ("bob" + i.toString, i) }.toDF("name", "age")
- df.registerTempTable("mee_table")
+ df.createOrReplaceTempView("mee_table")
hc.sql("CREATE TABLE moo_table (name string, age int)")
hc.sql("INSERT INTO moo_table SELECT * FROM mee_table")
assert(