aboutsummaryrefslogtreecommitdiff
path: root/yarn/src
diff options
context:
space:
mode:
authorMarcelo Vanzin <vanzin@cloudera.com>2016-04-05 15:19:51 -0700
committerMarcelo Vanzin <vanzin@cloudera.com>2016-04-05 15:19:51 -0700
commitd5ee9d5c240fca5c15b21efc4a760b06a1f39fd6 (patch)
treeb0c0d55466cdd5678849cc32d914408f9dd84472 /yarn/src
parent7329fe272d3ead7db9bc3e1e32adb7329dabc607 (diff)
downloadspark-d5ee9d5c240fca5c15b21efc4a760b06a1f39fd6.tar.gz
spark-d5ee9d5c240fca5c15b21efc4a760b06a1f39fd6.tar.bz2
spark-d5ee9d5c240fca5c15b21efc4a760b06a1f39fd6.zip
[SPARK-529][SQL] Modify SQLConf to use new config API from core.
Because SQL keeps track of all known configs, some customization was needed in SQLConf to allow that, since the core API does not have that feature. Tested via existing (and slightly updated) unit tests. Author: Marcelo Vanzin <vanzin@cloudera.com> Closes #11570 from vanzin/SPARK-529-sql.
Diffstat (limited to 'yarn/src')
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala92
1 files changed, 46 insertions, 46 deletions
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
index 5188a3e229..8d576bebb0 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
@@ -31,82 +31,82 @@ package object config {
"in YARN Application Reports, which can be used for filtering when querying YARN.")
.stringConf
.toSequence
- .optional
+ .createOptional
private[spark] val ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS =
ConfigBuilder("spark.yarn.am.attemptFailuresValidityInterval")
.doc("Interval after which AM failures will be considered independent and " +
"not accumulate towards the attempt count.")
.timeConf(TimeUnit.MILLISECONDS)
- .optional
+ .createOptional
private[spark] val MAX_APP_ATTEMPTS = ConfigBuilder("spark.yarn.maxAppAttempts")
.doc("Maximum number of AM attempts before failing the app.")
.intConf
- .optional
+ .createOptional
private[spark] val USER_CLASS_PATH_FIRST = ConfigBuilder("spark.yarn.user.classpath.first")
.doc("Whether to place user jars in front of Spark's classpath.")
.booleanConf
- .withDefault(false)
+ .createWithDefault(false)
private[spark] val GATEWAY_ROOT_PATH = ConfigBuilder("spark.yarn.config.gatewayPath")
.doc("Root of configuration paths that is present on gateway nodes, and will be replaced " +
"with the corresponding path in cluster machines.")
.stringConf
- .withDefault(null)
+ .createWithDefault(null)
private[spark] val REPLACEMENT_ROOT_PATH = ConfigBuilder("spark.yarn.config.replacementPath")
.doc(s"Path to use as a replacement for ${GATEWAY_ROOT_PATH.key} when launching processes " +
"in the YARN cluster.")
.stringConf
- .withDefault(null)
+ .createWithDefault(null)
private[spark] val QUEUE_NAME = ConfigBuilder("spark.yarn.queue")
.stringConf
- .withDefault("default")
+ .createWithDefault("default")
private[spark] val HISTORY_SERVER_ADDRESS = ConfigBuilder("spark.yarn.historyServer.address")
.stringConf
- .optional
+ .createOptional
/* File distribution. */
private[spark] val SPARK_ARCHIVE = ConfigBuilder("spark.yarn.archive")
.doc("Location of archive containing jars files with Spark classes.")
.stringConf
- .optional
+ .createOptional
private[spark] val SPARK_JARS = ConfigBuilder("spark.yarn.jars")
.doc("Location of jars containing Spark classes.")
.stringConf
.toSequence
- .optional
+ .createOptional
private[spark] val ARCHIVES_TO_DISTRIBUTE = ConfigBuilder("spark.yarn.dist.archives")
.stringConf
.toSequence
- .withDefault(Nil)
+ .createWithDefault(Nil)
private[spark] val FILES_TO_DISTRIBUTE = ConfigBuilder("spark.yarn.dist.files")
.stringConf
.toSequence
- .withDefault(Nil)
+ .createWithDefault(Nil)
private[spark] val JARS_TO_DISTRIBUTE = ConfigBuilder("spark.yarn.dist.jars")
.stringConf
.toSequence
- .withDefault(Nil)
+ .createWithDefault(Nil)
private[spark] val PRESERVE_STAGING_FILES = ConfigBuilder("spark.yarn.preserve.staging.files")
.doc("Whether to preserve temporary files created by the job in HDFS.")
.booleanConf
- .withDefault(false)
+ .createWithDefault(false)
private[spark] val STAGING_FILE_REPLICATION = ConfigBuilder("spark.yarn.submit.file.replication")
.doc("Replication factor for files uploaded by Spark to HDFS.")
.intConf
- .optional
+ .createOptional
private[spark] val STAGING_DIR = ConfigBuilder("spark.yarn.stagingDir")
.doc("Staging directory used while submitting applications.")
@@ -119,146 +119,146 @@ package object config {
.doc("In cluster mode, whether to wait for the application to finish before exiting the " +
"launcher process.")
.booleanConf
- .withDefault(true)
+ .createWithDefault(true)
private[spark] val REPORT_INTERVAL = ConfigBuilder("spark.yarn.report.interval")
.doc("Interval between reports of the current app status in cluster mode.")
.timeConf(TimeUnit.MILLISECONDS)
- .withDefaultString("1s")
+ .createWithDefaultString("1s")
/* Shared Client-mode AM / Driver configuration. */
private[spark] val AM_MAX_WAIT_TIME = ConfigBuilder("spark.yarn.am.waitTime")
.timeConf(TimeUnit.MILLISECONDS)
- .withDefaultString("100s")
+ .createWithDefaultString("100s")
private[spark] val AM_NODE_LABEL_EXPRESSION = ConfigBuilder("spark.yarn.am.nodeLabelExpression")
.doc("Node label expression for the AM.")
.stringConf
- .optional
+ .createOptional
private[spark] val CONTAINER_LAUNCH_MAX_THREADS =
ConfigBuilder("spark.yarn.containerLauncherMaxThreads")
.intConf
- .withDefault(25)
+ .createWithDefault(25)
private[spark] val MAX_EXECUTOR_FAILURES = ConfigBuilder("spark.yarn.max.executor.failures")
.intConf
- .optional
+ .createOptional
private[spark] val MAX_REPORTER_THREAD_FAILURES =
ConfigBuilder("spark.yarn.scheduler.reporterThread.maxFailures")
.intConf
- .withDefault(5)
+ .createWithDefault(5)
private[spark] val RM_HEARTBEAT_INTERVAL =
ConfigBuilder("spark.yarn.scheduler.heartbeat.interval-ms")
.timeConf(TimeUnit.MILLISECONDS)
- .withDefaultString("3s")
+ .createWithDefaultString("3s")
private[spark] val INITIAL_HEARTBEAT_INTERVAL =
ConfigBuilder("spark.yarn.scheduler.initial-allocation.interval")
.timeConf(TimeUnit.MILLISECONDS)
- .withDefaultString("200ms")
+ .createWithDefaultString("200ms")
private[spark] val SCHEDULER_SERVICES = ConfigBuilder("spark.yarn.services")
.doc("A comma-separated list of class names of services to add to the scheduler.")
.stringConf
.toSequence
- .withDefault(Nil)
+ .createWithDefault(Nil)
/* Client-mode AM configuration. */
private[spark] val AM_CORES = ConfigBuilder("spark.yarn.am.cores")
.intConf
- .withDefault(1)
+ .createWithDefault(1)
private[spark] val AM_JAVA_OPTIONS = ConfigBuilder("spark.yarn.am.extraJavaOptions")
.doc("Extra Java options for the client-mode AM.")
.stringConf
- .optional
+ .createOptional
private[spark] val AM_LIBRARY_PATH = ConfigBuilder("spark.yarn.am.extraLibraryPath")
.doc("Extra native library path for the client-mode AM.")
.stringConf
- .optional
+ .createOptional
private[spark] val AM_MEMORY_OVERHEAD = ConfigBuilder("spark.yarn.am.memoryOverhead")
.bytesConf(ByteUnit.MiB)
- .optional
+ .createOptional
private[spark] val AM_MEMORY = ConfigBuilder("spark.yarn.am.memory")
.bytesConf(ByteUnit.MiB)
- .withDefaultString("512m")
+ .createWithDefaultString("512m")
/* Driver configuration. */
private[spark] val DRIVER_CORES = ConfigBuilder("spark.driver.cores")
.intConf
- .withDefault(1)
+ .createWithDefault(1)
private[spark] val DRIVER_MEMORY_OVERHEAD = ConfigBuilder("spark.yarn.driver.memoryOverhead")
.bytesConf(ByteUnit.MiB)
- .optional
+ .createOptional
/* Executor configuration. */
private[spark] val EXECUTOR_CORES = ConfigBuilder("spark.executor.cores")
.intConf
- .withDefault(1)
+ .createWithDefault(1)
private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.yarn.executor.memoryOverhead")
.bytesConf(ByteUnit.MiB)
- .optional
+ .createOptional
private[spark] val EXECUTOR_NODE_LABEL_EXPRESSION =
ConfigBuilder("spark.yarn.executor.nodeLabelExpression")
.doc("Node label expression for executors.")
.stringConf
- .optional
+ .createOptional
/* Security configuration. */
private[spark] val CREDENTIAL_FILE_MAX_COUNT =
ConfigBuilder("spark.yarn.credentials.file.retention.count")
.intConf
- .withDefault(5)
+ .createWithDefault(5)
private[spark] val CREDENTIALS_FILE_MAX_RETENTION =
ConfigBuilder("spark.yarn.credentials.file.retention.days")
.intConf
- .withDefault(5)
+ .createWithDefault(5)
private[spark] val NAMENODES_TO_ACCESS = ConfigBuilder("spark.yarn.access.namenodes")
.doc("Extra NameNode URLs for which to request delegation tokens. The NameNode that hosts " +
"fs.defaultFS does not need to be listed here.")
.stringConf
.toSequence
- .withDefault(Nil)
+ .createWithDefault(Nil)
private[spark] val TOKEN_RENEWAL_INTERVAL = ConfigBuilder("spark.yarn.token.renewal.interval")
- .internal
+ .internal()
.timeConf(TimeUnit.MILLISECONDS)
- .optional
+ .createOptional
/* Private configs. */
private[spark] val CREDENTIALS_FILE_PATH = ConfigBuilder("spark.yarn.credentials.file")
- .internal
+ .internal()
.stringConf
- .withDefault(null)
+ .createWithDefault(null)
// Internal config to propagate the location of the user's jar to the driver/executors
private[spark] val APP_JAR = ConfigBuilder("spark.yarn.user.jar")
- .internal
+ .internal()
.stringConf
- .optional
+ .createOptional
// Internal config to propagate the locations of any extra jars to add to the classpath
// of the executors
private[spark] val SECONDARY_JARS = ConfigBuilder("spark.yarn.secondary.jars")
- .internal
+ .internal()
.stringConf
.toSequence
- .optional
+ .createOptional
}