aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorgatorsmile <gatorsmile@gmail.com>2016-06-02 13:22:43 -0700
committerShixiong Zhu <shixiong@databricks.com>2016-06-02 13:22:43 -0700
commit9aff6f3b1915523432b1921fdd30fa015ed5d670 (patch)
tree10295f61fdeee607d38c5ba71126c358c5da44f1 /sql/hive
parent8900c8d8ff1614b5ec5a2ce213832fa13462b4d4 (diff)
downloadspark-9aff6f3b1915523432b1921fdd30fa015ed5d670.tar.gz
spark-9aff6f3b1915523432b1921fdd30fa015ed5d670.tar.bz2
spark-9aff6f3b1915523432b1921fdd30fa015ed5d670.zip
[SPARK-15515][SQL] Error Handling in Running SQL Directly On Files
#### What changes were proposed in this pull request? This PR is to address the following issues: - **ISSUE 1:** For ORC source format, we are reporting the strange error message when we did not enable Hive support: ```SQL SQL Example: select id from `org.apache.spark.sql.hive.orc`.`file_path` Error Message: Table or view not found: `org.apache.spark.sql.hive.orc`.`file_path` ``` Instead, we should issue the error message like: ``` Expected Error Message: The ORC data source must be used with Hive support enabled ``` - **ISSUE 2:** For the Avro format, we report the strange error message like: The example query is like ```SQL SQL Example: select id from `avro`.`file_path` select id from `com.databricks.spark.avro`.`file_path` Error Message: Table or view not found: `com.databricks.spark.avro`.`file_path` ``` The desired message should be like: ``` Expected Error Message: Failed to find data source: avro. Please use Spark package http://spark-packages.org/package/databricks/spark-avro" ``` - ~~**ISSUE 3:** Unable to detect incompatibility libraries for Spark 2.0 in Data Source Resolution. We report a strange error message:~~ **Update**: The latest code changes contains - For JDBC format, we added an extra checking in the rule `ResolveRelations` of `Analyzer`. Without the PR, Spark will return the error message like: `Option 'url' not specified`. Now, we are reporting `Unsupported data source type for direct query on files: jdbc` - Make data source format name case incensitive so that error handling behaves consistent with the normal cases. - Added the test cases for all the supported formats. #### How was this patch tested? Added test cases to cover all the above issues Author: gatorsmile <gatorsmile@gmail.com> Author: xiaoli <lixiao1983@gmail.com> Author: Xiao Li <xiaoli@Xiaos-MacBook-Pro.local> Closes #13283 from gatorsmile/runSQLAgainstFile.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala48
1 files changed, 46 insertions, 2 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index b5691450ca..24de223cf8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -1247,11 +1247,12 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
}
}
- test("run sql directly on files") {
+ test("run sql directly on files - parquet") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.parquet(f.getCanonicalPath)
- checkAnswer(sql(s"select id from parquet.`${f.getCanonicalPath}`"),
+ // data source type is case insensitive
+ checkAnswer(sql(s"select id from Parquet.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.parquet`.`${f.getCanonicalPath}`"),
df)
@@ -1260,6 +1261,49 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
})
}
+ test("run sql directly on files - orc") {
+ val df = spark.range(100).toDF()
+ withTempPath(f => {
+ df.write.orc(f.getCanonicalPath)
+ // data source type is case insensitive
+ checkAnswer(sql(s"select id from ORC.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select id from `org.apache.spark.sql.hive.orc`.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select a.id from orc.`${f.getCanonicalPath}` as a"),
+ df)
+ })
+ }
+
+ test("run sql directly on files - csv") {
+ val df = spark.range(100).toDF()
+ withTempPath(f => {
+ df.write.csv(f.getCanonicalPath)
+ // data source type is case insensitive
+ checkAnswer(sql(s"select cast(_c0 as int) id from CSV.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(
+ sql(s"select cast(_c0 as int) id from `com.databricks.spark.csv`.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select cast(a._c0 as int) id from csv.`${f.getCanonicalPath}` as a"),
+ df)
+ })
+ }
+
+ test("run sql directly on files - json") {
+ val df = spark.range(100).toDF()
+ withTempPath(f => {
+ df.write.json(f.getCanonicalPath)
+ // data source type is case insensitive
+ checkAnswer(sql(s"select id from jsoN.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select id from `org.apache.spark.sql.json`.`${f.getCanonicalPath}`"),
+ df)
+ checkAnswer(sql(s"select a.id from json.`${f.getCanonicalPath}` as a"),
+ df)
+ })
+ }
+
test("SPARK-8976 Wrong Result for Rollup #1") {
checkAnswer(sql(
"SELECT count(*) AS cnt, key % 5, grouping_id() FROM src GROUP BY key%5 WITH ROLLUP"),