aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala48
1 files changed, 46 insertions, 2 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index b5691450ca..24de223cf8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -1247,11 +1247,12 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
}
}
- test("run sql directly on files") {
+ test("run sql directly on files - parquet") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.parquet(f.getCanonicalPath)
- checkAnswer(sql(s"select id from parquet.`${f.getCanonicalPath}`"),
+ // data source type is case insensitive
+ checkAnswer(sql(s"select id from Parquet.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.parquet`.`${f.getCanonicalPath}`"),
df)
@@ -1260,6 +1261,49 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
})
}
+ test("run sql directly on files - orc") {
+ val df = spark.range(100).toDF()
+ withTempPath(f => {
+ df.write.orc(f.getCanonicalPath)
+ // data source type is case insensitive
+ checkAnswer(sql(s"select id from ORC.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select id from `org.apache.spark.sql.hive.orc`.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select a.id from orc.`${f.getCanonicalPath}` as a"),
+ df)
+ })
+ }
+
+ test("run sql directly on files - csv") {
+ val df = spark.range(100).toDF()
+ withTempPath(f => {
+ df.write.csv(f.getCanonicalPath)
+ // data source type is case insensitive
+ checkAnswer(sql(s"select cast(_c0 as int) id from CSV.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(
+ sql(s"select cast(_c0 as int) id from `com.databricks.spark.csv`.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select cast(a._c0 as int) id from csv.`${f.getCanonicalPath}` as a"),
+ df)
+ })
+ }
+
+ test("run sql directly on files - json") {
+ val df = spark.range(100).toDF()
+ withTempPath(f => {
+ df.write.json(f.getCanonicalPath)
+ // data source type is case insensitive
+ checkAnswer(sql(s"select id from jsoN.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select id from `org.apache.spark.sql.json`.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select a.id from json.`${f.getCanonicalPath}` as a"),
+ df)
+ })
+ }
+
test("SPARK-8976 Wrong Result for Rollup #1") {
checkAnswer(sql(
"SELECT count(*) AS cnt, key % 5, grouping_id() FROM src GROUP BY key%5 WITH ROLLUP"),