aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/main
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-04-26 22:02:28 -0700
committerReynold Xin <rxin@databricks.com>2016-04-26 22:02:28 -0700
commit8fda5a73dc165fda2229a27c5a9e148b43b91c3a (patch)
treeda0fe0131f4ff26eafbec226f1668debfb60dab4 /sql/hive/src/main
parentd8a83a564ff3fd0281007adbf8aa3757da8a2c2b (diff)
downloadspark-8fda5a73dc165fda2229a27c5a9e148b43b91c3a.tar.gz
spark-8fda5a73dc165fda2229a27c5a9e148b43b91c3a.tar.bz2
spark-8fda5a73dc165fda2229a27c5a9e148b43b91c3a.zip
[SPARK-14913][SQL] Simplify configuration API
## What changes were proposed in this pull request? We currently expose both Hadoop configuration and Spark SQL configuration in RuntimeConfig. I think we can remove the Hadoop configuration part, and simply generate Hadoop Configuration on the fly by passing all the SQL configurations into it. This way, there is a single interface (in Java/Scala/Python/SQL) for end-users. As part of this patch, I also removed some config options deprecated in Spark 1.x. ## How was this patch tested? Updated relevant tests. Author: Reynold Xin <rxin@databricks.com> Closes #12689 from rxin/SPARK-14913.
Diffstat (limited to 'sql/hive/src/main')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala4
3 files changed, 5 insertions, 5 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index c4db4f307c..58c10b7b1d 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -549,7 +549,7 @@ private[hive] class MetaStoreFileCatalog(
Some(partitionSpecFromHive.partitionColumns)) {
override def getStatus(path: Path): Array[FileStatus] = {
- val fs = path.getFileSystem(sparkSession.sessionState.hadoopConf)
+ val fs = path.getFileSystem(sparkSession.sessionState.newHadoopConf())
fs.listStatus(path)
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala
index 4f81967a5b..d6a847f3ba 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala
@@ -57,7 +57,7 @@ private[sql] class DefaultSource
files: Seq[FileStatus]): Option[StructType] = {
OrcFileOperator.readSchema(
files.map(_.getPath.toUri.toString),
- Some(new Configuration(sparkSession.sessionState.hadoopConf))
+ Some(sparkSession.sessionState.newHadoopConf())
)
}
@@ -278,7 +278,7 @@ private[orc] case class OrcTableScan(
with HiveInspectors {
def execute(): RDD[InternalRow] = {
- val job = Job.getInstance(new Configuration(sparkSession.sessionState.hadoopConf))
+ val job = Job.getInstance(sparkSession.sessionState.newHadoopConf())
val conf = job.getConfiguration
// Tries to push down filters if ORC filter push-down is enabled
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index f74e5cd6f5..1d8f24cb27 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.CATALOG_IMPLEMENTATION
-import org.apache.spark.sql.{RuntimeConfig, SparkSession, SQLContext}
+import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry.FunctionBuilder
import org.apache.spark.sql.catalyst.expressions.ExpressionInfo
@@ -43,7 +43,7 @@ import org.apache.spark.sql.execution.QueryExecution
import org.apache.spark.sql.execution.command.{CacheTableCommand, HiveNativeCommand}
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.client.HiveClient
-import org.apache.spark.sql.internal.{RuntimeConfigImpl, SQLConf}
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.{ShutdownHookManager, Utils}
// SPARK-3729: Test key required to check for initialization errors with config.