aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorCheng Hao <hao.cheng@intel.com>2016-03-16 11:20:15 -0700
committerReynold Xin <rxin@databricks.com>2016-03-16 11:20:15 -0700
commitd9670f84739b0840501b19b8cb0e851850edb8c1 (patch)
treea664b7f0ca4e64b025408d770412dd8be8c07422 /sql/hive
parentd9e8f26d0334f393e3b02d7a3b607be54a2a5efe (diff)
downloadspark-d9670f84739b0840501b19b8cb0e851850edb8c1.tar.gz
spark-d9670f84739b0840501b19b8cb0e851850edb8c1.tar.bz2
spark-d9670f84739b0840501b19b8cb0e851850edb8c1.zip
[SPARK-13894][SQL] SqlContext.range return type from DataFrame to DataSet
## What changes were proposed in this pull request? https://issues.apache.org/jira/browse/SPARK-13894 Change the return type of the `SQLContext.range` API from `DataFrame` to `Dataset`. ## How was this patch tested? No additional unit test required. Author: Cheng Hao <hao.cheng@intel.com> Closes #11730 from chenghao-intel/range.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala2
4 files changed, 5 insertions, 5 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
index e2effef0b9..d275190744 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
@@ -22,7 +22,7 @@ import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.test.SQLTestUtils
class MultiDatabaseSuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
- private lazy val df = sqlContext.range(10).coalesce(1)
+ private lazy val df = sqlContext.range(10).coalesce(1).toDF()
private def checkTablePath(dbName: String, tableName: String): Unit = {
val metastoreTable = hiveContext.sessionState.catalog.client.getTable(dbName, tableName)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index d6c10d6ed9..9667b53e48 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -1425,7 +1425,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
}
test("run sql directly on files") {
- val df = sqlContext.range(100)
+ val df = sqlContext.range(100).toDF()
withTempPath(f => {
df.write.parquet(f.getCanonicalPath)
checkAnswer(sql(s"select id from parquet.`${f.getCanonicalPath}`"),
@@ -1582,7 +1582,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
withView("v") {
sql("CREATE VIEW v AS SELECT * FROM add_col")
sqlContext.range(10).select('id, 'id as 'a).write.mode("overwrite").saveAsTable("add_col")
- checkAnswer(sql("SELECT * FROM v"), sqlContext.range(10))
+ checkAnswer(sql("SELECT * FROM v"), sqlContext.range(10).toDF())
}
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
index 8856148a95..1e5dbd991e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
@@ -126,7 +126,7 @@ class ParquetHadoopFsRelationSuite extends HadoopFsRelationTest {
test("SPARK-8604: Parquet data source should write summary file while doing appending") {
withTempPath { dir =>
val path = dir.getCanonicalPath
- val df = sqlContext.range(0, 5)
+ val df = sqlContext.range(0, 5).toDF()
df.write.mode(SaveMode.Overwrite).parquet(path)
val summaryPath = new Path(path, "_metadata")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
index 7e09616380..7e5506ee4a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
@@ -673,7 +673,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
classOf[AlwaysFailOutputCommitter].getName)
// Code below shouldn't throw since customized output committer should be disabled.
- val df = sqlContext.range(10).coalesce(1)
+ val df = sqlContext.range(10).toDF().coalesce(1)
df.write.format(dataSourceName).save(dir.getCanonicalPath)
checkAnswer(
sqlContext