aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameAnalyticsSuite.scala10
2 files changed, 12 insertions, 1 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index 5872fbded3..dcb3737b70 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -313,7 +313,8 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
* @since 1.4.0
*/
def table(tableName: String): DataFrame = {
- DataFrame(sqlContext, sqlContext.catalog.lookupRelation(TableIdentifier(tableName)))
+ DataFrame(sqlContext,
+ sqlContext.catalog.lookupRelation(SqlParser.parseTableIdentifier(tableName)))
}
/**
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameAnalyticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameAnalyticsSuite.scala
index 9864acf765..f19a74d4b3 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameAnalyticsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameAnalyticsSuite.scala
@@ -34,10 +34,14 @@ class HiveDataFrameAnalyticsSuite extends QueryTest with TestHiveSingleton with
override def beforeAll() {
testData = Seq((1, 2), (2, 2), (3, 4)).toDF("a", "b")
hiveContext.registerDataFrameAsTable(testData, "mytable")
+ hiveContext.sql("create schema usrdb")
+ hiveContext.sql("create table usrdb.test(c1 int)")
}
override def afterAll(): Unit = {
hiveContext.dropTempTable("mytable")
+ hiveContext.sql("drop table usrdb.test")
+ hiveContext.sql("drop schema usrdb")
}
test("rollup") {
@@ -74,4 +78,10 @@ class HiveDataFrameAnalyticsSuite extends QueryTest with TestHiveSingleton with
sql("select a, b, sum(b) from mytable group by a, b with cube").collect()
)
}
+
+ // There was a bug in DataFrameFrameReader.table and it has problem for table with schema name,
+ // Before fix, it throw Exceptionorg.apache.spark.sql.catalyst.analysis.NoSuchTableException
+ test("table name with schema") {
+ hiveContext.read.table("usrdb.test")
+ }
}