aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver/src
diff options
context:
space:
mode:
authorguowei <guowei@upyoo.com>2014-08-13 17:45:24 -0700
committerMichael Armbrust <michael@databricks.com>2014-08-13 17:45:24 -0700
commit63d6777737ca8559d4344d1661500b8ad868bb47 (patch)
tree5d96e86e4bb409d356face8862d6aea12325ad4d /sql/hive-thriftserver/src
parent905dc4b405e679feb145f5e6b35e952db2442e0d (diff)
downloadspark-63d6777737ca8559d4344d1661500b8ad868bb47.tar.gz
spark-63d6777737ca8559d4344d1661500b8ad868bb47.tar.bz2
spark-63d6777737ca8559d4344d1661500b8ad868bb47.zip
[SPARK-2986] [SQL] fixed: setting properties does not effect
it seems that set command does not run by SparkSQLDriver. it runs on hive api. user can not change reduce number by setting spark.sql.shuffle.partitions but i think setting hive properties seems just a role to spark sql. Author: guowei <guowei@upyoo.com> Closes #1904 from guowei2/temp-branch and squashes the following commits: 7d47dde [guowei] fixed: setting properties like spark.sql.shuffle.partitions does not effective
Diffstat (limited to 'sql/hive-thriftserver/src')
-rwxr-xr-xsql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala4
1 files changed, 2 insertions, 2 deletions
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index 4ed0f58ebc..c16a7d3661 100755
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.common.{HiveInterruptCallback, HiveInterruptUtils,
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.exec.Utilities
-import org.apache.hadoop.hive.ql.processors.{CommandProcessor, CommandProcessorFactory}
+import org.apache.hadoop.hive.ql.processors.{SetProcessor, CommandProcessor, CommandProcessorFactory}
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.shims.ShimLoader
import org.apache.thrift.transport.TSocket
@@ -284,7 +284,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
val proc: CommandProcessor = CommandProcessorFactory.get(tokens(0), hconf)
if (proc != null) {
- if (proc.isInstanceOf[Driver]) {
+ if (proc.isInstanceOf[Driver] || proc.isInstanceOf[SetProcessor]) {
val driver = new SparkSQLDriver
driver.init()