aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala/org/apache
diff options
context:
space:
mode:
authorSital Kedia <skedia@fb.com>2016-04-02 19:17:25 -0700
committerSean Owen <sowen@cloudera.com>2016-04-02 19:17:25 -0700
commit1cf70183423b938ec064925b20fd4a5b9e355991 (patch)
tree789b7d4db27290e4000093c567177dfef27f9a2a /core/src/main/scala/org/apache
parent03d130f9734be66e8aefc4ffaa207ee13e837629 (diff)
downloadspark-1cf70183423b938ec064925b20fd4a5b9e355991.tar.gz
spark-1cf70183423b938ec064925b20fd4a5b9e355991.tar.bz2
spark-1cf70183423b938ec064925b20fd4a5b9e355991.zip
[SPARK-14056] Appends s3 specific configurations and spark.hadoop con…
## What changes were proposed in this pull request? Appends s3 specific configurations and spark.hadoop configurations to hive configuration. ## How was this patch tested? Tested by running a job on cluster. …figurations to hive configuration. Author: Sital Kedia <skedia@fb.com> Closes #11876 from sitalkedia/hiveConf.
Diffstat (limited to 'core/src/main/scala/org/apache')
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala19
1 files changed, 13 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
index 06b7b388ca..4e8e363635 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
@@ -74,13 +74,12 @@ class SparkHadoopUtil extends Logging {
}
}
- /**
- * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
- * subsystems.
- */
- def newConfiguration(conf: SparkConf): Configuration = {
- val hadoopConf = new Configuration()
+ /**
+ * Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
+ * configuration.
+ */
+ def appendS3AndSparkHadoopConfigurations(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Note: this null check is around more than just access to the "conf" object to maintain
// the behavior of the old implementation of this code, for backwards compatibility.
if (conf != null) {
@@ -106,7 +105,15 @@ class SparkHadoopUtil extends Logging {
val bufferSize = conf.get("spark.buffer.size", "65536")
hadoopConf.set("io.file.buffer.size", bufferSize)
}
+ }
+ /**
+ * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
+ * subsystems.
+ */
+ def newConfiguration(conf: SparkConf): Configuration = {
+ val hadoopConf = new Configuration()
+ appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
hadoopConf
}