aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver/v0.13.1
diff options
context:
space:
mode:
authorwangfei <wangfei1@huawei.com>2014-11-07 12:55:11 -0800
committerMichael Armbrust <michael@databricks.com>2014-11-07 12:55:40 -0800
commit47bd8f3020149a009f605e8390c2c28f3f835191 (patch)
treeee0e39397f50ea2fb82e2b2cb1faeadfb0d5104c /sql/hive-thriftserver/v0.13.1
parentc96da3676c32579d0f97347d35d95353b1d2ef07 (diff)
downloadspark-47bd8f3020149a009f605e8390c2c28f3f835191.tar.gz
spark-47bd8f3020149a009f605e8390c2c28f3f835191.tar.bz2
spark-47bd8f3020149a009f605e8390c2c28f3f835191.zip
[SPARK-4292][SQL] Result set iterator bug in JDBC/ODBC
select * from src, get the wrong result set as follows: ``` ... | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 309 | val_309 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | | 97 | val_97 | ... ``` Author: wangfei <wangfei1@huawei.com> Closes #3149 from scwf/SPARK-4292 and squashes the following commits: 1574a43 [wangfei] using result.collect 8b2d845 [wangfei] adding test f64eddf [wangfei] result set iter bug (cherry picked from commit d6e55524437026c0c76addeba8f99249a8316716) Signed-off-by: Michael Armbrust <michael@databricks.com>
Diffstat (limited to 'sql/hive-thriftserver/v0.13.1')
-rw-r--r--sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala5
1 files changed, 2 insertions, 3 deletions
diff --git a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index 2c1983de1d..f2ceba8282 100644
--- a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -87,13 +87,12 @@ private[hive] class SparkExecuteStatementOperation(
val groupId = round(random * 1000000).toString
hiveContext.sparkContext.setJobGroup(groupId, statement)
iter = {
- val resultRdd = result.queryExecution.toRdd
val useIncrementalCollect =
hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", "false").toBoolean
if (useIncrementalCollect) {
- resultRdd.toLocalIterator
+ result.toLocalIterator
} else {
- resultRdd.collect().iterator
+ result.collect().iterator
}
}
dataTypes = result.queryExecution.analyzed.output.map(_.dataType).toArray