diff options
author | Kousuke Saruta <sarutak@oss.nttdata.co.jp> | 2014-12-23 19:14:34 -0800 |
---|---|---|
committer | Josh Rosen <joshrosen@databricks.com> | 2014-12-23 19:14:34 -0800 |
commit | 199e59aacd540e17b31f38e0e32a3618870e9055 (patch) | |
tree | ea9d7af5eed21a2779d2f2a2009f8a7298a7c44b | |
parent | fd41eb9574280b5cfee9b94b4f92e4c44363fb14 (diff) | |
download | spark-199e59aacd540e17b31f38e0e32a3618870e9055.tar.gz spark-199e59aacd540e17b31f38e0e32a3618870e9055.tar.bz2 spark-199e59aacd540e17b31f38e0e32a3618870e9055.zip |
[SPARK-4881][Minor] Use SparkConf#getBoolean instead of get().toBoolean
It's really a minor issue.
In ApplicationMaster, there is code like as follows.
val preserveFiles = sparkConf.get("spark.yarn.preserve.staging.files", "false").toBoolean
I think, the code can be simplified like as follows.
val preserveFiles = sparkConf.getBoolean("spark.yarn.preserve.staging.files", false)
Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp>
Closes #3733 from sarutak/SPARK-4881 and squashes the following commits:
1771430 [Kousuke Saruta] Modified the code like sparkConf.get(...).toBoolean to sparkConf.getBoolean(...)
c63daa0 [Kousuke Saruta] Simplified code
4 files changed, 5 insertions, 5 deletions
diff --git a/core/src/main/scala/org/apache/spark/SecurityManager.scala b/core/src/main/scala/org/apache/spark/SecurityManager.scala index 49dae5231a..ec82d09cd0 100644 --- a/core/src/main/scala/org/apache/spark/SecurityManager.scala +++ b/core/src/main/scala/org/apache/spark/SecurityManager.scala @@ -151,8 +151,8 @@ private[spark] class SecurityManager(sparkConf: SparkConf) extends Logging with private val authOn = sparkConf.getBoolean("spark.authenticate", false) // keep spark.ui.acls.enable for backwards compatibility with 1.0 - private var aclsOn = sparkConf.getOption("spark.acls.enable").getOrElse( - sparkConf.get("spark.ui.acls.enable", "false")).toBoolean + private var aclsOn = + sparkConf.getBoolean("spark.acls.enable", sparkConf.getBoolean("spark.ui.acls.enable", false)) // admin acls should be set before view or modify acls private var adminAcls: Set[String] = diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala index a157e36e22..0001c2329c 100644 --- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala @@ -131,7 +131,7 @@ class HadoopRDD[K, V]( // used to build JobTracker ID private val createTime = new Date() - private val shouldCloneJobConf = sc.conf.get("spark.hadoop.cloneConf", "false").toBoolean + private val shouldCloneJobConf = sc.conf.getBoolean("spark.hadoop.cloneConf", false) // Returns a JobConf that will be used on slaves to obtain input splits for Hadoop reads. protected def getJobConf(): JobConf = { diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala index b2e45435c4..9c77dff48d 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala @@ -311,7 +311,7 @@ private[spark] class ApplicationMaster(args: ApplicationMasterArguments, private def cleanupStagingDir(fs: FileSystem) { var stagingDirPath: Path = null try { - val preserveFiles = sparkConf.get("spark.yarn.preserve.staging.files", "false").toBoolean + val preserveFiles = sparkConf.getBoolean("spark.yarn.preserve.staging.files", false) if (!preserveFiles) { stagingDirPath = new Path(System.getenv("SPARK_YARN_STAGING_DIR")) if (stagingDirPath == null) { diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala index 5f0c67f05c..eb97a7b3c5 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientBase.scala @@ -693,7 +693,7 @@ private[spark] object ClientBase extends Logging { addClasspathEntry(Environment.PWD.$(), env) // Normally the users app.jar is last in case conflicts with spark jars - if (sparkConf.get("spark.yarn.user.classpath.first", "false").toBoolean) { + if (sparkConf.getBoolean("spark.yarn.user.classpath.first", false)) { addUserClasspath(args, sparkConf, env) addFileToClasspath(sparkJar(sparkConf), SPARK_JAR, env) populateHadoopClasspath(conf, env) |