summaryrefslogtreecommitdiff
path: root/site/docs/1.0.1/cluster-overview.html
diff options
context:
space:
mode:
authorPatrick Wendell <pwendell@apache.org>2014-07-11 17:23:23 +0000
committerPatrick Wendell <pwendell@apache.org>2014-07-11 17:23:23 +0000
commit0beac4e243f85e71554fe04093b09eb1745fea82 (patch)
treebc20d10426c5d57e2f189305865dc2bbec447923 /site/docs/1.0.1/cluster-overview.html
parentddec2123ba6ab95543d1b250d4f20fb811c48f09 (diff)
downloadspark-website-0beac4e243f85e71554fe04093b09eb1745fea82.tar.gz
spark-website-0beac4e243f85e71554fe04093b09eb1745fea82.tar.bz2
spark-website-0beac4e243f85e71554fe04093b09eb1745fea82.zip
Updating docs for 1.0.1 release
Diffstat (limited to 'site/docs/1.0.1/cluster-overview.html')
-rw-r--r--site/docs/1.0.1/cluster-overview.html290
1 files changed, 290 insertions, 0 deletions
diff --git a/site/docs/1.0.1/cluster-overview.html b/site/docs/1.0.1/cluster-overview.html
new file mode 100644
index 000000000..0a2d43216
--- /dev/null
+++ b/site/docs/1.0.1/cluster-overview.html
@@ -0,0 +1,290 @@
+<!DOCTYPE html>
+<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
+<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
+<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
+ <head>
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <title>Cluster Mode Overview - Spark 1.0.1 Documentation</title>
+ <meta name="description" content="">
+
+
+
+ <link rel="stylesheet" href="css/bootstrap.min.css">
+ <style>
+ body {
+ padding-top: 60px;
+ padding-bottom: 40px;
+ }
+ </style>
+ <meta name="viewport" content="width=device-width">
+ <link rel="stylesheet" href="css/bootstrap-responsive.min.css">
+ <link rel="stylesheet" href="css/main.css">
+
+ <script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
+
+ <link rel="stylesheet" href="css/pygments-default.css">
+
+
+ <!-- Google analytics script -->
+ <script type="text/javascript">
+ var _gaq = _gaq || [];
+ _gaq.push(['_setAccount', 'UA-32518208-1']);
+ _gaq.push(['_trackPageview']);
+
+ (function() {
+ var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+ ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+ var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+ })();
+ </script>
+
+
+ </head>
+ <body>
+ <!--[if lt IE 7]>
+ <p class="chromeframe">You are using an outdated browser. <a href="http://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
+ <![endif]-->
+
+ <!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
+
+ <div class="navbar navbar-fixed-top" id="topbar">
+ <div class="navbar-inner">
+ <div class="container">
+ <div class="brand"><a href="index.html">
+ <img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">1.0.1</span>
+ </div>
+ <ul class="nav">
+ <!--TODO(andyk): Add class="active" attribute to li some how.-->
+ <li><a href="index.html">Overview</a></li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">Programming Guides<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="quick-start.html">Quick Start</a></li>
+ <li><a href="programming-guide.html">Spark Programming Guide</a></li>
+ <li class="divider"></li>
+ <li><a href="streaming-programming-guide.html">Spark Streaming</a></li>
+ <li><a href="sql-programming-guide.html">Spark SQL</a></li>
+ <li><a href="mllib-guide.html">MLlib (Machine Learning)</a></li>
+ <li><a href="graphx-programming-guide.html">GraphX (Graph Processing)</a></li>
+ <li><a href="bagel-programming-guide.html">Bagel (Pregel on Spark)</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">API Docs<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="api/scala/index.html#org.apache.spark.package">Scaladoc</a></li>
+ <li><a href="api/java/index.html">Javadoc</a></li>
+ <li><a href="api/python/index.html">Python API</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">Deploying<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="cluster-overview.html">Overview</a></li>
+ <li><a href="submitting-applications.html">Submitting Applications</a></li>
+ <li class="divider"></li>
+ <li><a href="ec2-scripts.html">Amazon EC2</a></li>
+ <li><a href="spark-standalone.html">Standalone Mode</a></li>
+ <li><a href="running-on-mesos.html">Mesos</a></li>
+ <li><a href="running-on-yarn.html">YARN</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="api.html" class="dropdown-toggle" data-toggle="dropdown">More<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="configuration.html">Configuration</a></li>
+ <li><a href="monitoring.html">Monitoring</a></li>
+ <li><a href="tuning.html">Tuning Guide</a></li>
+ <li><a href="job-scheduling.html">Job Scheduling</a></li>
+ <li><a href="security.html">Security</a></li>
+ <li><a href="hardware-provisioning.html">Hardware Provisioning</a></li>
+ <li><a href="hadoop-third-party-distributions.html">3<sup>rd</sup>-Party Hadoop Distros</a></li>
+ <li class="divider"></li>
+ <li><a href="building-with-maven.html">Building Spark with Maven</a></li>
+ <li><a href="https://cwiki.apache.org/confluence/display/SPARK/Contributing+to+Spark">Contributing to Spark</a></li>
+ </ul>
+ </li>
+ </ul>
+ <!--<p class="navbar-text pull-right"><span class="version-text">v1.0.1</span></p>-->
+ </div>
+ </div>
+ </div>
+
+ <div class="container" id="content">
+
+ <h1 class="title">Cluster Mode Overview</h1>
+
+
+ <p>This document gives a short overview of how Spark runs on clusters, to make it easier to understand
+the components involved. Read through the <a href="submitting-applications.html">application submission guide</a>
+to submit applications to a cluster.</p>
+
+<h1 id="components">Components</h1>
+
+<p>Spark applications run as independent sets of processes on a cluster, coordinated by the SparkContext
+object in your main program (called the <em>driver program</em>).
+Specifically, to run on a cluster, the SparkContext can connect to several types of <em>cluster managers</em>
+(either Spark&#8217;s own standalone cluster manager or Mesos/YARN), which allocate resources across
+applications. Once connected, Spark acquires <em>executors</em> on nodes in the cluster, which are
+processes that run computations and store data for your application.
+Next, it sends your application code (defined by JAR or Python files passed to SparkContext) to
+the executors. Finally, SparkContext sends <em>tasks</em> for the executors to run.</p>
+
+<p style="text-align: center;">
+ <img src="img/cluster-overview.png" title="Spark cluster components" alt="Spark cluster components" />
+</p>
+
+<p>There are several useful things to note about this architecture:</p>
+
+<ol>
+ <li>Each application gets its own executor processes, which stay up for the duration of the whole
+application and run tasks in multiple threads. This has the benefit of isolating applications
+from each other, on both the scheduling side (each driver schedules its own tasks) and executor
+side (tasks from different applications run in different JVMs). However, it also means that
+data cannot be shared across different Spark applications (instances of SparkContext) without
+writing it to an external storage system.</li>
+ <li>Spark is agnostic to the underlying cluster manager. As long as it can acquire executor
+processes, and these communicate with each other, it is relatively easy to run it even on a
+cluster manager that also supports other applications (e.g. Mesos/YARN).</li>
+ <li>Because the driver schedules tasks on the cluster, it should be run close to the worker
+nodes, preferably on the same local area network. If you&#8217;d like to send requests to the
+cluster remotely, it&#8217;s better to open an RPC to the driver and have it submit operations
+from nearby than to run a driver far away from the worker nodes.</li>
+</ol>
+
+<h1 id="cluster-manager-types">Cluster Manager Types</h1>
+
+<p>The system currently supports three cluster managers:</p>
+
+<ul>
+ <li><a href="spark-standalone.html">Standalone</a> &#8211; a simple cluster manager included with Spark that makes it
+easy to set up a cluster.</li>
+ <li><a href="running-on-mesos.html">Apache Mesos</a> &#8211; a general cluster manager that can also run Hadoop MapReduce
+and service applications.</li>
+ <li><a href="running-on-yarn.html">Hadoop YARN</a> &#8211; the resource manager in Hadoop 2.</li>
+</ul>
+
+<p>In addition, Spark&#8217;s <a href="ec2-scripts.html">EC2 launch scripts</a> make it easy to launch a standalone
+cluster on Amazon EC2.</p>
+
+<h1 id="submitting-applications">Submitting Applications</h1>
+
+<p>Applications can be submitted to a cluster of any type using the <code>spark-submit</code> script.
+The <a href="submitting-applications.html">application submission guide</a> describes how to do this.</p>
+
+<h1 id="monitoring">Monitoring</h1>
+
+<p>Each driver program has a web UI, typically on port 4040, that displays information about running
+tasks, executors, and storage usage. Simply go to <code>http://&lt;driver-node&gt;:4040</code> in a web browser to
+access this UI. The <a href="monitoring.html">monitoring guide</a> also describes other monitoring options.</p>
+
+<h1 id="job-scheduling">Job Scheduling</h1>
+
+<p>Spark gives control over resource allocation both <em>across</em> applications (at the level of the cluster
+manager) and <em>within</em> applications (if multiple computations are happening on the same SparkContext).
+The <a href="job-scheduling.html">job scheduling overview</a> describes this in more detail.</p>
+
+<h1 id="glossary">Glossary</h1>
+
+<p>The following table summarizes terms you&#8217;ll see used to refer to cluster concepts:</p>
+
+<table class="table">
+ <thead>
+ <tr><th style="width: 130px;">Term</th><th>Meaning</th></tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>Application</td>
+ <td>User program built on Spark. Consists of a <em>driver program</em> and <em>executors</em> on the cluster.</td>
+ </tr>
+ <tr>
+ <td>Application jar</td>
+ <td>
+ A jar containing the user's Spark application. In some cases users will want to create
+ an "uber jar" containing their application along with its dependencies. The user's jar
+ should never include Hadoop or Spark libraries, however, these will be added at runtime.
+ </td>
+ </tr>
+ <tr>
+ <td>Driver program</td>
+ <td>The process running the main() function of the application and creating the SparkContext</td>
+ </tr>
+ <tr>
+ <td>Cluster manager</td>
+ <td>An external service for acquiring resources on the cluster (e.g. standalone manager, Mesos, YARN)</td>
+ </tr>
+ <tr>
+ <td>Deploy mode</td>
+ <td>Distinguishes where the driver process runs. In "cluster" mode, the framework launches
+ the driver inside of the cluster. In "client" mode, the submitter launches the driver
+ outside of the cluster.</td>
+ </tr>
+ <tr>
+ <td>Worker node</td>
+ <td>Any node that can run application code in the cluster</td>
+ </tr>
+ <tr>
+ <td>Executor</td>
+ <td>A process launched for an application on a worker node, that runs tasks and keeps data in memory
+ or disk storage across them. Each application has its own executors.</td>
+ </tr>
+ <tr>
+ <td>Task</td>
+ <td>A unit of work that will be sent to one executor</td>
+ </tr>
+ <tr>
+ <td>Job</td>
+ <td>A parallel computation consisting of multiple tasks that gets spawned in response to a Spark action
+ (e.g. <code>save</code>, <code>collect</code>); you'll see this term used in the driver's logs.</td>
+ </tr>
+ <tr>
+ <td>Stage</td>
+ <td>Each job gets divided into smaller sets of tasks called <em>stages</em> that depend on each other
+ (similar to the map and reduce stages in MapReduce); you'll see this term used in the driver's logs.</td>
+ </tr>
+ </tbody>
+</table>
+
+
+ </div> <!-- /container -->
+
+ <script src="js/vendor/jquery-1.8.0.min.js"></script>
+ <script src="js/vendor/bootstrap.min.js"></script>
+ <script src="js/main.js"></script>
+
+ <!-- MathJax Section -->
+ <script type="text/x-mathjax-config">
+ MathJax.Hub.Config({
+ TeX: { equationNumbers: { autoNumber: "AMS" } }
+ });
+ </script>
+ <script>
+ // Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
+ // We could use "//cdn.mathjax...", but that won't support "file://".
+ (function(d, script) {
+ script = d.createElement('script');
+ script.type = 'text/javascript';
+ script.async = true;
+ script.onload = function(){
+ MathJax.Hub.Config({
+ tex2jax: {
+ inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
+ displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
+ processEscapes: true,
+ skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
+ }
+ });
+ };
+ script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
+ 'cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML';
+ d.getElementsByTagName('head')[0].appendChild(script);
+ }(document));
+ </script>
+ </body>
+</html>