aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2016-04-29 14:54:40 -0700
committerReynold Xin <rxin@databricks.com>2016-04-29 14:54:40 -0700
commitaf32f4aed650ba7acb381b98f3487e889e96f8c9 (patch)
treedb316103f24defc3c8f2862c91ead65efb1b3b2a /sql
parenta04b1de5faa5270f48ef0ca1fbaf630ed72c3918 (diff)
downloadspark-af32f4aed650ba7acb381b98f3487e889e96f8c9.tar.gz
spark-af32f4aed650ba7acb381b98f3487e889e96f8c9.tar.bz2
spark-af32f4aed650ba7acb381b98f3487e889e96f8c9.zip
[SPARK-15013][SQL] Remove hiveConf from HiveSessionState
## What changes were proposed in this pull request? The hiveConf in HiveSessionState is not actually used anymore. Let's remove it. ## How was this patch tested? Existing tests Author: Yin Huai <yhuai@databricks.com> Closes #12786 from yhuai/removeHiveConf.
Diffstat (limited to 'sql')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala26
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala5
2 files changed, 1 insertions, 30 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
index 9608f0b4ef..b17a88b2ef 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
@@ -45,31 +45,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
*/
lazy val metadataHive: HiveClient = sharedState.metadataHive.newSession()
- /**
- * SQLConf and HiveConf contracts:
- *
- * 1. create a new o.a.h.hive.ql.session.SessionState for each HiveContext
- * 2. when the Hive session is first initialized, params in HiveConf will get picked up by the
- * SQLConf. Additionally, any properties set by set() or a SET command inside sql() will be
- * set in the SQLConf *as well as* in the HiveConf.
- */
- lazy val hiveconf: HiveConf = {
- val initialConf = new HiveConf(
- sparkSession.sparkContext.hadoopConfiguration,
- classOf[org.apache.hadoop.hive.ql.session.SessionState])
-
- // HiveConf is a Hadoop Configuration, which has a field of classLoader and
- // the initial value will be the current thread's context class loader
- // (i.e. initClassLoader at here).
- // We call initialConf.setClassLoader(initClassLoader) at here to make
- // this action explicit.
- initialConf.setClassLoader(sparkSession.sharedState.jarClassLoader)
- sparkSession.sparkContext.conf.getAll.foreach { case (k, v) =>
- initialConf.set(k, v)
- }
- initialConf
- }
-
setDefaultOverrideConfs()
/**
@@ -145,7 +120,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
override def setConf(key: String, value: String): Unit = {
super.setConf(key, value)
metadataHive.runSqlHive(s"SET $key=$value")
- hiveconf.set(key, value)
}
override def addJar(path: String): Unit = {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index c4a3a74b9b..e763b63380 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -153,9 +153,6 @@ private[hive] class TestHiveSparkSession(
// By clearing the port we force Spark to pick a new one. This allows us to rerun tests
// without restarting the JVM.
System.clearProperty("spark.hostPort")
- CommandProcessorFactory.clean(sessionState.hiveconf)
-
- sessionState.hiveconf.set("hive.plan.serialization.format", "javaXML")
// For some hive test case which contain ${system:test.tmp.dir}
System.setProperty("test.tmp.dir", Utils.createTempDir().getCanonicalPath)
@@ -423,7 +420,7 @@ private[hive] class TestHiveSparkSession(
foreach { udfName => FunctionRegistry.unregisterTemporaryUDF(udfName) }
// Some tests corrupt this value on purpose, which breaks the RESET call below.
- sessionState.hiveconf.set("fs.default.name", new File(".").toURI.toString)
+ sessionState.conf.setConfString("fs.default.name", new File(".").toURI.toString)
// It is important that we RESET first as broken hooks that might have been set could break
// other sql exec here.
sessionState.metadataHive.runSqlHive("RESET")