aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/main
diff options
context:
space:
mode:
authorCheng Lian <lian.cs.zju@gmail.com>2014-06-16 21:30:29 +0200
committerReynold Xin <rxin@apache.org>2014-06-16 16:47:50 -0700
commit9c675de992d29327436edf7143dba3e4ef5601a8 (patch)
tree3681f700f513cd6e583b13ff8f94387fd5c5386f /sql/hive/src/main
parentd7f94b9348c5289c3954023fc1593769851bcd36 (diff)
downloadspark-9c675de992d29327436edf7143dba3e4ef5601a8.tar.gz
spark-9c675de992d29327436edf7143dba3e4ef5601a8.tar.bz2
spark-9c675de992d29327436edf7143dba3e4ef5601a8.zip
[SQL][SPARK-2094] Follow up of PR #1071 for Java API
Updated `JavaSQLContext` and `JavaHiveContext` similar to what we've done to `SQLContext` and `HiveContext` in PR #1071. Added corresponding test case for Spark SQL Java API. Author: Cheng Lian <lian.cs.zju@gmail.com> Closes #1085 from liancheng/spark-2094-java and squashes the following commits: 29b8a51 [Cheng Lian] Avoided instantiating JavaSparkContext & JavaHiveContext to workaround test failure 92bb4fb [Cheng Lian] Marked test cases in JavaHiveQLSuite with "ignore" 22aec97 [Cheng Lian] Follow up of PR #1071 for Java API (cherry picked from commit 273afcb254fb5384204c56bdcb3b9b760bcfab3f) Signed-off-by: Reynold Xin <rxin@apache.org>
Diffstat (limited to 'sql/hive/src/main')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/api/java/JavaHiveContext.scala10
1 files changed, 2 insertions, 8 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/api/java/JavaHiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/api/java/JavaHiveContext.scala
index 6df76fa825..c9ee162191 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/api/java/JavaHiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/api/java/JavaHiveContext.scala
@@ -31,12 +31,6 @@ class JavaHiveContext(sparkContext: JavaSparkContext) extends JavaSQLContext(spa
/**
* Executes a query expressed in HiveQL, returning the result as a JavaSchemaRDD.
*/
- def hql(hqlQuery: String): JavaSchemaRDD = {
- val result = new JavaSchemaRDD(sqlContext, HiveQl.parseSql(hqlQuery))
- // We force query optimization to happen right away instead of letting it happen lazily like
- // when using the query DSL. This is so DDL commands behave as expected. This is only
- // generates the RDD lineage for DML queries, but do not perform any execution.
- result.queryExecution.toRdd
- result
- }
+ def hql(hqlQuery: String): JavaSchemaRDD =
+ new JavaSchemaRDD(sqlContext, HiveQl.parseSql(hqlQuery))
}