aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver
diff options
context:
space:
mode:
authorguowei <guowei@upyoo.com>2014-08-13 17:45:24 -0700
committerMichael Armbrust <michael@databricks.com>2014-08-13 17:45:32 -0700
commita8d2649719b3d8fdb1eed29ef179a6a896b3e37a (patch)
treebc86b69ac7f87c3b19e3b42809afb5fdfe60a9d8 /sql/hive-thriftserver
parentb5b632c8cd02fd1e65ebd22216d20ec76715fc5d (diff)
downloadspark-a8d2649719b3d8fdb1eed29ef179a6a896b3e37a.tar.gz
spark-a8d2649719b3d8fdb1eed29ef179a6a896b3e37a.tar.bz2
spark-a8d2649719b3d8fdb1eed29ef179a6a896b3e37a.zip
[SPARK-2986] [SQL] fixed: setting properties does not effect
it seems that set command does not run by SparkSQLDriver. it runs on hive api. user can not change reduce number by setting spark.sql.shuffle.partitions but i think setting hive properties seems just a role to spark sql. Author: guowei <guowei@upyoo.com> Closes #1904 from guowei2/temp-branch and squashes the following commits: 7d47dde [guowei] fixed: setting properties like spark.sql.shuffle.partitions does not effective (cherry picked from commit 63d6777737ca8559d4344d1661500b8ad868bb47) Signed-off-by: Michael Armbrust <michael@databricks.com>
Diffstat (limited to 'sql/hive-thriftserver')
-rwxr-xr-xsql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala4
1 files changed, 2 insertions, 2 deletions
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index 4ed0f58ebc..c16a7d3661 100755
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.common.{HiveInterruptCallback, HiveInterruptUtils,
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.exec.Utilities
-import org.apache.hadoop.hive.ql.processors.{CommandProcessor, CommandProcessorFactory}
+import org.apache.hadoop.hive.ql.processors.{SetProcessor, CommandProcessor, CommandProcessorFactory}
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.shims.ShimLoader
import org.apache.thrift.transport.TSocket
@@ -284,7 +284,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
val proc: CommandProcessor = CommandProcessorFactory.get(tokens(0), hconf)
if (proc != null) {
- if (proc.isInstanceOf[Driver]) {
+ if (proc.isInstanceOf[Driver] || proc.isInstanceOf[SetProcessor]) {
val driver = new SparkSQLDriver
driver.init()