summaryrefslogtreecommitdiff
path: root/site/docs/0.9.2/configuration.html
diff options
context:
space:
mode:
authorXiangrui Meng <meng@apache.org>2014-07-23 22:08:20 +0000
committerXiangrui Meng <meng@apache.org>2014-07-23 22:08:20 +0000
commit300a95cc9c7b141fd243fee8275e47d8b2afd5c3 (patch)
treeba4a76b7c5d5dc9dffd2e6f63fe8c5b4f5604d63 /site/docs/0.9.2/configuration.html
parentf8b391a470bae7a7d29195c61a79c73a78eb43b5 (diff)
downloadspark-website-300a95cc9c7b141fd243fee8275e47d8b2afd5c3.tar.gz
spark-website-300a95cc9c7b141fd243fee8275e47d8b2afd5c3.tar.bz2
spark-website-300a95cc9c7b141fd243fee8275e47d8b2afd5c3.zip
add spark 0.9.2
Diffstat (limited to 'site/docs/0.9.2/configuration.html')
-rw-r--r--site/docs/0.9.2/configuration.html706
1 files changed, 706 insertions, 0 deletions
diff --git a/site/docs/0.9.2/configuration.html b/site/docs/0.9.2/configuration.html
new file mode 100644
index 000000000..4bdaeacf5
--- /dev/null
+++ b/site/docs/0.9.2/configuration.html
@@ -0,0 +1,706 @@
+<!DOCTYPE html>
+<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
+<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
+<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
+ <head>
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <title>Spark Configuration - Spark 0.9.2 Documentation</title>
+ <meta name="description" content="">
+
+ <link rel="stylesheet" href="css/bootstrap.min.css">
+ <style>
+ body {
+ padding-top: 60px;
+ padding-bottom: 40px;
+ }
+ </style>
+ <meta name="viewport" content="width=device-width">
+ <link rel="stylesheet" href="css/bootstrap-responsive.min.css">
+ <link rel="stylesheet" href="css/main.css">
+
+ <script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
+
+ <link rel="stylesheet" href="css/pygments-default.css">
+
+
+ <!-- Google analytics script -->
+ <script type="text/javascript">
+ var _gaq = _gaq || [];
+ _gaq.push(['_setAccount', 'UA-32518208-1']);
+ _gaq.push(['_trackPageview']);
+
+ (function() {
+ var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+ ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+ var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+ })();
+ </script>
+
+
+ </head>
+ <body>
+ <!--[if lt IE 7]>
+ <p class="chromeframe">You are using an outdated browser. <a href="http://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
+ <![endif]-->
+
+ <!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
+
+ <div class="navbar navbar-fixed-top" id="topbar">
+ <div class="navbar-inner">
+ <div class="container">
+ <div class="brand"><a href="index.html">
+ <img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">0.9.2</span>
+ </div>
+ <ul class="nav">
+ <!--TODO(andyk): Add class="active" attribute to li some how.-->
+ <li><a href="index.html">Overview</a></li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">Programming Guides<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="quick-start.html">Quick Start</a></li>
+ <li><a href="scala-programming-guide.html">Spark in Scala</a></li>
+ <li><a href="java-programming-guide.html">Spark in Java</a></li>
+ <li><a href="python-programming-guide.html">Spark in Python</a></li>
+ <li class="divider"></li>
+ <li><a href="streaming-programming-guide.html">Spark Streaming</a></li>
+ <li><a href="mllib-guide.html">MLlib (Machine Learning)</a></li>
+ <li><a href="bagel-programming-guide.html">Bagel (Pregel on Spark)</a></li>
+ <li><a href="graphx-programming-guide.html">GraphX (Graph Processing)</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">API Docs<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="api/core/index.html#org.apache.spark.package">Spark Core for Java/Scala</a></li>
+ <li><a href="api/pyspark/index.html">Spark Core for Python</a></li>
+ <li class="divider"></li>
+ <li><a href="api/streaming/index.html#org.apache.spark.streaming.package">Spark Streaming</a></li>
+ <li><a href="api/mllib/index.html#org.apache.spark.mllib.package">MLlib (Machine Learning)</a></li>
+ <li><a href="api/bagel/index.html#org.apache.spark.bagel.package">Bagel (Pregel on Spark)</a></li>
+ <li><a href="api/graphx/index.html#org.apache.spark.graphx.package">GraphX (Graph Processing)</a></li>
+ <li class="divider"></li>
+ <li class="dropdown-submenu">
+ <a tabindex="-1" href="#">External Data Sources</a>
+ <ul class="dropdown-menu">
+ <li><a href="api/external/kafka/index.html#org.apache.spark.streaming.kafka.KafkaUtils$">Kafka</a></li>
+ <li><a href="api/external/flume/index.html#org.apache.spark.streaming.flume.FlumeUtils$">Flume</a></li>
+ <li><a href="api/external/twitter/index.html#org.apache.spark.streaming.twitter.TwitterUtils$">Twitter</a></li>
+ <li><a href="api/external/zeromq/index.html#org.apache.spark.streaming.zeromq.ZeroMQUtils$">ZeroMQ</a></li>
+ <li><a href="api/external/mqtt/index.html#org.apache.spark.streaming.mqtt.MQTTUtils$">MQTT</a></li>
+ </ul>
+ </li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">Deploying<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="cluster-overview.html">Overview</a></li>
+ <li><a href="ec2-scripts.html">Amazon EC2</a></li>
+ <li><a href="spark-standalone.html">Standalone Mode</a></li>
+ <li><a href="running-on-mesos.html">Mesos</a></li>
+ <li><a href="running-on-yarn.html">YARN</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="api.html" class="dropdown-toggle" data-toggle="dropdown">More<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="configuration.html">Configuration</a></li>
+ <li><a href="monitoring.html">Monitoring</a></li>
+ <li><a href="tuning.html">Tuning Guide</a></li>
+ <li><a href="hadoop-third-party-distributions.html">Running with CDH/HDP</a></li>
+ <li><a href="hardware-provisioning.html">Hardware Provisioning</a></li>
+ <li><a href="job-scheduling.html">Job Scheduling</a></li>
+ <li class="divider"></li>
+ <li><a href="building-with-maven.html">Building Spark with Maven</a></li>
+ <li><a href="https://cwiki.apache.org/confluence/display/SPARK/Contributing+to+Spark">Contributing to Spark</a></li>
+ </ul>
+ </li>
+ </ul>
+ <!--<p class="navbar-text pull-right"><span class="version-text">v0.9.2</span></p>-->
+ </div>
+ </div>
+ </div>
+
+ <div class="container" id="content">
+ <h1 class="title">Spark Configuration</h1>
+
+ <p>Spark provides three locations to configure the system:</p>
+
+<ul>
+ <li><a href="#spark-properties">Spark properties</a> control most application parameters and can be set by passing
+a <a href="api/core/index.html#org.apache.spark.SparkConf">SparkConf</a> object to SparkContext, or through Java
+system properties.</li>
+ <li><a href="#environment-variables">Environment variables</a> can be used to set per-machine settings, such as
+the IP address, through the <code>conf/spark-env.sh</code> script on each node.</li>
+ <li><a href="#configuring-logging">Logging</a> can be configured through <code>log4j.properties</code>.</li>
+</ul>
+
+<h1 id="spark-properties">Spark Properties</h1>
+
+<p>Spark properties control most application settings and are configured separately for each application.
+The preferred way to set them is by passing a <a href="api/core/index.html#org.apache.spark.SparkConf">SparkConf</a>
+class to your SparkContext constructor.
+Alternatively, Spark will also load them from Java system properties, for compatibility with old versions
+of Spark.</p>
+
+<p>SparkConf lets you configure most of the common properties to initialize a cluster (e.g., master URL and
+application name), as well as arbitrary key-value pairs through the <code>set()</code> method. For example, we could
+initialize an application as follows:</p>
+
+<div class="highlight"><pre><code class="scala"><span class="k">val</span> <span class="n">conf</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkConf</span><span class="o">()</span>
+ <span class="o">.</span><span class="n">setMaster</span><span class="o">(</span><span class="s">&quot;local&quot;</span><span class="o">)</span>
+ <span class="o">.</span><span class="n">setAppName</span><span class="o">(</span><span class="s">&quot;My application&quot;</span><span class="o">)</span>
+ <span class="o">.</span><span class="n">set</span><span class="o">(</span><span class="s">&quot;spark.executor.memory&quot;</span><span class="o">,</span> <span class="s">&quot;1g&quot;</span><span class="o">)</span>
+<span class="k">val</span> <span class="n">sc</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">)</span>
+</code></pre></div>
+
+<p>Most of the properties control internal settings that have reasonable default values. However,
+there are at least five properties that you will commonly want to control:</p>
+
+<table class="table">
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr>
+ <td>spark.executor.memory</td>
+ <td>512m</td>
+ <td>
+ Amount of memory to use per executor process, in the same format as JVM memory strings (e.g. <code>512m</code>, <code>2g</code>).
+ </td>
+</tr>
+<tr>
+ <td>spark.serializer</td>
+ <td>org.apache.spark.serializer.<br />JavaSerializer</td>
+ <td>
+ Class to use for serializing objects that will be sent over the network or need to be cached
+ in serialized form. The default of Java serialization works with any Serializable Java object but is
+ quite slow, so we recommend <a href="tuning.html">using <code>org.apache.spark.serializer.KryoSerializer</code>
+ and configuring Kryo serialization</a> when speed is necessary. Can be any subclass of
+ <a href="api/core/index.html#org.apache.spark.serializer.Serializer"><code>org.apache.spark.Serializer</code></a>.
+ </td>
+</tr>
+<tr>
+ <td>spark.kryo.registrator</td>
+ <td>(none)</td>
+ <td>
+ If you use Kryo serialization, set this class to register your custom classes with Kryo.
+ It should be set to a class that extends
+ <a href="api/core/index.html#org.apache.spark.serializer.KryoRegistrator"><code>KryoRegistrator</code></a>.
+ See the <a href="tuning.html#data-serialization">tuning guide</a> for more details.
+ </td>
+</tr>
+<tr>
+ <td>spark.local.dir</td>
+ <td>/tmp</td>
+ <td>
+ Directory to use for "scratch" space in Spark, including map output files and RDDs that get stored
+ on disk. This should be on a fast, local disk in your system. It can also be a comma-separated
+ list of multiple directories on different disks.
+ </td>
+</tr>
+<tr>
+ <td>spark.cores.max</td>
+ <td>(not set)</td>
+ <td>
+ When running on a <a href="spark-standalone.html">standalone deploy cluster</a> or a
+ <a href="running-on-mesos.html#mesos-run-modes">Mesos cluster in "coarse-grained"
+ sharing mode</a>, the maximum amount of CPU cores to request for the application from
+ across the cluster (not from each machine). If not set, the default will be
+ <code>spark.deploy.defaultCores</code> on Spark's standalone cluster manager, or
+ infinite (all available cores) on Mesos.
+ </td>
+</tr>
+</table>
+
+<p>Apart from these, the following properties are also available, and may be useful in some situations:</p>
+
+<table class="table">
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr>
+ <td>spark.default.parallelism</td>
+ <td>8</td>
+ <td>
+ Default number of tasks to use across the cluster for distributed shuffle operations (<code>groupByKey</code>,
+ <code>reduceByKey</code>, etc) when not set by user.
+ </td>
+</tr>
+<tr>
+ <td>spark.storage.memoryFraction</td>
+ <td>0.6</td>
+ <td>
+ Fraction of Java heap to use for Spark's memory cache. This should not be larger than the "old"
+ generation of objects in the JVM, which by default is given 0.6 of the heap, but you can increase
+ it if you configure your own old generation size.
+ </td>
+</tr>
+<tr>
+ <td>spark.shuffle.memoryFraction</td>
+ <td>0.3</td>
+ <td>
+ Fraction of Java heap to use for aggregation and cogroups during shuffles, if
+ <code>spark.shuffle.spill</code> is true. At any given time, the collective size of
+ all in-memory maps used for shuffles is bounded by this limit, beyond which the contents will
+ begin to spill to disk. If spills are often, consider increasing this value at the expense of
+ <code>spark.storage.memoryFraction</code>.
+ </td>
+</tr>
+<tr>
+ <td>spark.storage.memoryMapThreshold</td>
+ <td>8192</td>
+ <td>
+ Size of a block, in bytes, above which Spark memory maps when reading a block from disk.
+ This prevents Spark from memory mapping very small blocks. In general, memory
+ mapping has high overhead for blocks close to or below the page size of the operating system.
+ </td>
+</tr>
+<tr>
+ <td>spark.mesos.coarse</td>
+ <td>false</td>
+ <td>
+ If set to "true", runs over Mesos clusters in
+ <a href="running-on-mesos.html#mesos-run-modes">"coarse-grained" sharing mode</a>,
+ where Spark acquires one long-lived Mesos task on each machine instead of one Mesos task per Spark task.
+ This gives lower-latency scheduling for short queries, but leaves resources in use for the whole
+ duration of the Spark job.
+ </td>
+</tr>
+<tr>
+ <td>spark.ui.port</td>
+ <td>4040</td>
+ <td>
+ Port for your application's dashboard, which shows memory and workload data
+ </td>
+</tr>
+<tr>
+ <td>spark.ui.retainedStages</td>
+ <td>1000</td>
+ <td>
+ How many stages the Spark UI remembers before garbage collecting.
+ </td>
+</tr>
+<tr>
+ <td>spark.shuffle.compress</td>
+ <td>true</td>
+ <td>
+ Whether to compress map output files. Generally a good idea.
+ </td>
+</tr>
+<tr>
+ <td>spark.shuffle.spill.compress</td>
+ <td>true</td>
+ <td>
+ Whether to compress data spilled during shuffles.
+ </td>
+</tr>
+<tr>
+ <td>spark.broadcast.compress</td>
+ <td>true</td>
+ <td>
+ Whether to compress broadcast variables before sending them. Generally a good idea.
+ </td>
+</tr>
+<tr>
+ <td>spark.rdd.compress</td>
+ <td>false</td>
+ <td>
+ Whether to compress serialized RDD partitions (e.g. for <code>StorageLevel.MEMORY_ONLY_SER</code>).
+ Can save substantial space at the cost of some extra CPU time.
+ </td>
+</tr>
+<tr>
+ <td>spark.io.compression.codec</td>
+ <td>org.apache.spark.io.<br />LZFCompressionCodec</td>
+ <td>
+ The codec used to compress internal data such as RDD partitions and shuffle outputs. By default, Spark provides two
+ codecs: <code>org.apache.spark.io.LZFCompressionCodec</code> and <code>org.apache.spark.io.SnappyCompressionCodec</code>.
+ </td>
+</tr>
+<tr>
+ <td>spark.io.compression.snappy.block.size</td>
+ <td>32768</td>
+ <td>
+ Block size (in bytes) used in Snappy compression, in the case when Snappy compression codec is used.
+ </td>
+</tr>
+<tr>
+ <td>spark.scheduler.mode</td>
+ <td>FIFO</td>
+ <td>
+ The <a href="job-scheduling.html#scheduling-within-an-application">scheduling mode</a> between
+ jobs submitted to the same SparkContext. Can be set to <code>FAIR</code>
+ to use fair sharing instead of queueing jobs one after another. Useful for
+ multi-user services.
+ </td>
+</tr>
+<tr>
+ <td>spark.reducer.maxMbInFlight</td>
+ <td>48</td>
+ <td>
+ Maximum size (in megabytes) of map outputs to fetch simultaneously from each reduce task. Since
+ each output requires us to create a buffer to receive it, this represents a fixed memory overhead
+ per reduce task, so keep it small unless you have a large amount of memory.
+ </td>
+</tr>
+<tr>
+ <td>spark.closure.serializer</td>
+ <td>org.apache.spark.serializer.<br />JavaSerializer</td>
+ <td>
+ Serializer class to use for closures. Generally Java is fine unless your distributed functions
+ (e.g. map functions) reference large objects in the driver program.
+ </td>
+</tr>
+<tr>
+ <td>spark.kryo.referenceTracking</td>
+ <td>true</td>
+ <td>
+ Whether to track references to the same object when serializing data with Kryo, which is
+ necessary if your object graphs have loops and useful for efficiency if they contain multiple
+ copies of the same object. Can be disabled to improve performance if you know this is not the
+ case.
+ </td>
+</tr>
+<tr>
+ <td>spark.kryoserializer.buffer.mb</td>
+ <td>2</td>
+ <td>
+ Maximum object size to allow within Kryo (the library needs to create a buffer at least as
+ large as the largest single object you'll serialize). Increase this if you get a "buffer limit
+ exceeded" exception inside Kryo. Note that there will be one buffer <i>per core</i> on each worker.
+ </td>
+</tr>
+<tr>
+ <td>spark.broadcast.factory</td>
+ <td>org.apache.spark.broadcast.<br />HttpBroadcastFactory</td>
+ <td>
+ Which broadcast implementation to use.
+ </td>
+</tr>
+<tr>
+ <td>spark.locality.wait</td>
+ <td>3000</td>
+ <td>
+ Number of milliseconds to wait to launch a data-local task before giving up and launching it
+ on a less-local node. The same wait will be used to step through multiple locality levels
+ (process-local, node-local, rack-local and then any). It is also possible to customize the
+ waiting time for each level by setting <code>spark.locality.wait.node</code>, etc.
+ You should increase this setting if your tasks are long and see poor locality, but the
+ default usually works well.
+ </td>
+</tr>
+<tr>
+ <td>spark.locality.wait.process</td>
+ <td>spark.locality.wait</td>
+ <td>
+ Customize the locality wait for process locality. This affects tasks that attempt to access
+ cached data in a particular executor process.
+ </td>
+</tr>
+<tr>
+ <td>spark.locality.wait.node</td>
+ <td>spark.locality.wait</td>
+ <td>
+ Customize the locality wait for node locality. For example, you can set this to 0 to skip
+ node locality and search immediately for rack locality (if your cluster has rack information).
+ </td>
+</tr>
+<tr>
+ <td>spark.locality.wait.rack</td>
+ <td>spark.locality.wait</td>
+ <td>
+ Customize the locality wait for rack locality.
+ </td>
+</tr>
+<tr>
+ <td>spark.worker.timeout</td>
+ <td>60</td>
+ <td>
+ Number of seconds after which the standalone deploy master considers a worker lost if it
+ receives no heartbeats.
+ </td>
+</tr>
+<tr>
+ <td>spark.akka.frameSize</td>
+ <td>10</td>
+ <td>
+ Maximum message size to allow in "control plane" communication (for serialized tasks and task
+ results), in MB. Increase this if your tasks need to send back large results to the driver
+ (e.g. using <code>collect()</code> on a large dataset).
+ </td>
+</tr>
+<tr>
+ <td>spark.akka.threads</td>
+ <td>4</td>
+ <td>
+ Number of actor threads to use for communication. Can be useful to increase on large clusters
+ when the driver has a lot of CPU cores.
+ </td>
+</tr>
+<tr>
+ <td>spark.akka.timeout</td>
+ <td>100</td>
+ <td>
+ Communication timeout between Spark nodes, in seconds.
+ </td>
+</tr>
+<tr>
+ <td>spark.akka.heartbeat.pauses</td>
+ <td>600</td>
+ <td>
+ This is set to a larger value to disable failure detector that comes inbuilt akka. It can be enabled again, if you plan to use this feature (Not recommended). Acceptable heart beat pause in seconds for akka. This can be used to control sensitivity to gc pauses. Tune this in combination of `spark.akka.heartbeat.interval` and `spark.akka.failure-detector.threshold` if you need to.
+ </td>
+</tr>
+<tr>
+ <td>spark.akka.failure-detector.threshold</td>
+ <td>300.0</td>
+ <td>
+ This is set to a larger value to disable failure detector that comes inbuilt akka. It can be enabled again, if you plan to use this feature (Not recommended). This maps to akka's `akka.remote.transport-failure-detector.threshold`. Tune this in combination of `spark.akka.heartbeat.pauses` and `spark.akka.heartbeat.interval` if you need to.
+ </td>
+</tr>
+<tr>
+ <td>spark.akka.heartbeat.interval</td>
+ <td>1000</td>
+ <td>
+ This is set to a larger value to disable failure detector that comes inbuilt akka. It can be enabled again, if you plan to use this feature (Not recommended). A larger interval value in seconds reduces network overhead and a smaller value ( ~ 1 s) might be more informative for akka's failure detector. Tune this in combination of `spark.akka.heartbeat.pauses` and `spark.akka.failure-detector.threshold` if you need to. Only positive use case for using failure detector can be, a sensistive failure detector can help evict rogue executors really quick. However this is usually not the case as gc pauses and network lags are expected in a real spark cluster. Apart from that enabling this leads to a lot of exchanges of heart beats between nodes leading to flooding the network with those.
+ </td>
+</tr>
+<tr>
+ <td>spark.driver.host</td>
+ <td>(local hostname)</td>
+ <td>
+ Hostname or IP address for the driver to listen on.
+ </td>
+</tr>
+<tr>
+ <td>spark.driver.port</td>
+ <td>(random)</td>
+ <td>
+ Port for the driver to listen on.
+ </td>
+</tr>
+<tr>
+ <td>spark.cleaner.ttl</td>
+ <td>(infinite)</td>
+ <td>
+ Duration (seconds) of how long Spark will remember any metadata (stages generated, tasks generated, etc.).
+ Periodic cleanups will ensure that metadata older than this duration will be forgetten. This is
+ useful for running Spark for many hours / days (for example, running 24/7 in case of Spark Streaming
+ applications). Note that any RDD that persists in memory for more than this duration will be cleared as well.
+ </td>
+</tr>
+<tr>
+ <td>spark.streaming.blockInterval</td>
+ <td>200</td>
+ <td>
+ Duration (milliseconds) of how long to batch new objects coming from network receivers used
+ in Spark Streaming.
+ </td>
+</tr>
+<tr>
+ <td>spark.streaming.unpersist</td>
+ <td>false</td>
+ <td>
+ Force RDDs generated and persisted by Spark Streaming to be automatically unpersisted from
+ Spark's memory. Setting this to true is likely to reduce Spark's RDD memory usage.
+ </td>
+</tr>
+<tr>
+ <td>spark.task.maxFailures</td>
+ <td>4</td>
+ <td>
+ Number of individual task failures before giving up on the job.
+ Should be greater than or equal to 1. Number of allowed retries = this value - 1.
+ </td>
+</tr>
+<tr>
+ <td>spark.broadcast.blockSize</td>
+ <td>4096</td>
+ <td>
+ Size of each piece of a block in kilobytes for <code>TorrentBroadcastFactory</code>.
+ Too large a value decreases parallelism during broadcast (makes it slower); however, if it is too small, <code>BlockManager</code> might take a performance hit.
+ </td>
+</tr>
+
+<tr>
+ <td>spark.shuffle.consolidateFiles</td>
+ <td>false</td>
+ <td>
+ If set to "true", consolidates intermediate files created during a shuffle. Creating fewer files can improve filesystem performance for shuffles with large numbers of reduce tasks. It is recommended to set this to "true" when using ext4 or xfs filesystems. On ext3, this option might degrade performance on machines with many (&gt;8) cores due to filesystem limitations.
+ </td>
+</tr>
+<tr>
+ <td>spark.shuffle.file.buffer.kb</td>
+ <td>100</td>
+ <td>
+ Size of the in-memory buffer for each shuffle file output stream, in kilobytes. These buffers
+ reduce the number of disk seeks and system calls made in creating intermediate shuffle files.
+ </td>
+</tr>
+<tr>
+ <td>spark.shuffle.spill</td>
+ <td>true</td>
+ <td>
+ If set to "true", limits the amount of memory used during reduces by spilling data out to disk. This spilling
+ threshold is specified by <code>spark.shuffle.memoryFraction</code>.
+ </td>
+</tr>
+<tr>
+ <td>spark.speculation</td>
+ <td>false</td>
+ <td>
+ If set to "true", performs speculative execution of tasks. This means if one or more tasks are running slowly in a stage, they will be re-launched.
+ </td>
+</tr>
+<tr>
+ <td>spark.speculation.interval</td>
+ <td>100</td>
+ <td>
+ How often Spark will check for tasks to speculate, in milliseconds.
+ </td>
+</tr>
+<tr>
+ <td>spark.speculation.quantile</td>
+ <td>0.75</td>
+ <td>
+ Percentage of tasks which must be complete before speculation is enabled for a particular stage.
+ </td>
+</tr>
+<tr>
+ <td>spark.speculation.multiplier</td>
+ <td>1.5</td>
+ <td>
+ How many times slower a task is than the median to be considered for speculation.
+ </td>
+</tr>
+<tr>
+ <td>spark.logConf</td>
+ <td>false</td>
+ <td>
+ Log the supplied SparkConf as INFO at start of spark context.
+ </td>
+</tr>
+<tr>
+ <td>spark.deploy.spreadOut</td>
+ <td>true</td>
+ <td>
+ Whether the standalone cluster manager should spread applications out across nodes or try
+ to consolidate them onto as few nodes as possible. Spreading out is usually better for
+ data locality in HDFS, but consolidating is more efficient for compute-intensive workloads. <br />
+ <b>Note:</b> this setting needs to be configured in the standalone cluster master, not in individual
+ applications; you can set it through <code>SPARK_JAVA_OPTS</code> in <code>spark-env.sh</code>.
+ </td>
+</tr>
+<tr>
+ <td>spark.deploy.defaultCores</td>
+ <td>(infinite)</td>
+ <td>
+ Default number of cores to give to applications in Spark's standalone mode if they don't
+ set <code>spark.cores.max</code>. If not set, applications always get all available
+ cores unless they configure <code>spark.cores.max</code> themselves.
+ Set this lower on a shared cluster to prevent users from grabbing
+ the whole cluster by default. <br />
+ <b>Note:</b> this setting needs to be configured in the standalone cluster master, not in individual
+ applications; you can set it through <code>SPARK_JAVA_OPTS</code> in <code>spark-env.sh</code>.
+ </td>
+</tr>
+</table>
+
+<h2 id="viewing-spark-properties">Viewing Spark Properties</h2>
+
+<p>The application web UI at <code>http://&lt;driver&gt;:4040</code> lists Spark properties in the &#8220;Environment&#8221; tab.
+This is a useful place to check to make sure that your properties have been set correctly.</p>
+
+<h1 id="environment-variables">Environment Variables</h1>
+
+<p>Certain Spark settings can be configured through environment variables, which are read from the <code>conf/spark-env.sh</code>
+script in the directory where Spark is installed (or <code>conf/spark-env.cmd</code> on Windows). These variables are meant to be for machine-specific settings, such
+as library search paths. While Spark properties can also be set there through <code>SPARK_JAVA_OPTS</code>, for per-application settings, we recommend setting
+these properties within the application instead of in <code>spark-env.sh</code> so that different applications can use different
+settings.</p>
+
+<p>Note that <code>conf/spark-env.sh</code> does not exist by default when Spark is installed. However, you can copy
+<code>conf/spark-env.sh.template</code> to create it. Make sure you make the copy executable.</p>
+
+<p>The following variables can be set in <code>spark-env.sh</code>:</p>
+
+<ul>
+ <li><code>JAVA_HOME</code>, the location where Java is installed (if it&#8217;s not on your default <code>PATH</code>)</li>
+ <li><code>PYSPARK_PYTHON</code>, the Python binary to use for PySpark</li>
+ <li><code>SPARK_LOCAL_IP</code>, to configure which IP address of the machine to bind to.</li>
+ <li><code>SPARK_LIBRARY_PATH</code>, to add search directories for native libraries.</li>
+ <li><code>SPARK_CLASSPATH</code>, to add elements to Spark&#8217;s classpath that you want to be present for <em>all</em> applications.
+ Note that applications can also add dependencies for themselves through <code>SparkContext.addJar</code> &#8211; we recommend
+ doing that when possible.</li>
+ <li><code>SPARK_JAVA_OPTS</code>, to add JVM options. This includes Java options like garbage collector settings and any system
+ properties that you&#8217;d like to pass with <code>-D</code>. One use case is to set some Spark properties differently on this
+ machine, e.g., <code>-Dspark.local.dir=/disk1,/disk2</code>.</li>
+ <li>Options for the Spark <a href="spark-standalone.html#cluster-launch-scripts">standalone cluster scripts</a>, such as number of cores
+to use on each machine and maximum memory.</li>
+</ul>
+
+<p>Since <code>spark-env.sh</code> is a shell script, some of these can be set programmatically &#8211; for example, you might
+compute <code>SPARK_LOCAL_IP</code> by looking up the IP of a specific network interface.</p>
+
+<h1 id="configuring-logging">Configuring Logging</h1>
+
+<p>Spark uses <a href="http://logging.apache.org/log4j/">log4j</a> for logging. You can configure it by adding a <code>log4j.properties</code>
+file in the <code>conf</code> directory. One way to start is to copy the existing <code>log4j.properties.template</code> located there.</p>
+
+ <!-- Main hero unit for a primary marketing message or call to action -->
+ <!--<div class="hero-unit">
+ <h1>Hello, world!</h1>
+ <p>This is a template for a simple marketing or informational website. It includes a large callout called the hero unit and three supporting pieces of content. Use it as a starting point to create something more unique.</p>
+ <p><a class="btn btn-primary btn-large">Learn more &raquo;</a></p>
+ </div>-->
+
+ <!-- Example row of columns -->
+ <!--<div class="row">
+ <div class="span4">
+ <h2>Heading</h2>
+ <p>Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui. </p>
+ <p><a class="btn" href="#">View details &raquo;</a></p>
+ </div>
+ <div class="span4">
+ <h2>Heading</h2>
+ <p>Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui. </p>
+ <p><a class="btn" href="#">View details &raquo;</a></p>
+ </div>
+ <div class="span4">
+ <h2>Heading</h2>
+ <p>Donec sed odio dui. Cras justo odio, dapibus ac facilisis in, egestas eget quam. Vestibulum id ligula porta felis euismod semper. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus.</p>
+ <p><a class="btn" href="#">View details &raquo;</a></p>
+ </div>
+ </div>
+
+ <hr>-->
+
+ </div> <!-- /container -->
+
+ <script src="js/vendor/jquery-1.8.0.min.js"></script>
+ <script src="js/vendor/bootstrap.min.js"></script>
+ <script src="js/main.js"></script>
+
+ <!-- A script to fix internal hash links because we have an overlapping top bar.
+ Based on https://github.com/twitter/bootstrap/issues/193#issuecomment-2281510 -->
+ <script>
+ $(function() {
+ function maybeScrollToHash() {
+ if (window.location.hash && $(window.location.hash).length) {
+ var newTop = $(window.location.hash).offset().top - $('#topbar').height() - 5;
+ $(window).scrollTop(newTop);
+ }
+ }
+ $(window).bind('hashchange', function() {
+ maybeScrollToHash();
+ });
+ // Scroll now too in case we had opened the page on a hash, but wait 1 ms because some browsers
+ // will try to do *their* initial scroll after running the onReady handler.
+ setTimeout(function() { maybeScrollToHash(); }, 1)
+ })
+ </script>
+
+ </body>
+</html>