summaryrefslogtreecommitdiff
path: root/site/docs/1.3.0/quick-start.html
diff options
context:
space:
mode:
authorPatrick Wendell <pwendell@apache.org>2015-03-13 02:30:55 +0000
committerPatrick Wendell <pwendell@apache.org>2015-03-13 02:30:55 +0000
commitaaf670598f15d47784d8b91b34dd614d0f8162af (patch)
treed86a329b056b3e9e9e79591d5b5bc3ee1781386b /site/docs/1.3.0/quick-start.html
parent0d484ac791630a4f0ca7e82002113d94c931a1e7 (diff)
downloadspark-website-aaf670598f15d47784d8b91b34dd614d0f8162af.tar.gz
spark-website-aaf670598f15d47784d8b91b34dd614d0f8162af.tar.bz2
spark-website-aaf670598f15d47784d8b91b34dd614d0f8162af.zip
Spark 1.3.0 docs
Diffstat (limited to 'site/docs/1.3.0/quick-start.html')
-rw-r--r--site/docs/1.3.0/quick-start.html558
1 files changed, 558 insertions, 0 deletions
diff --git a/site/docs/1.3.0/quick-start.html b/site/docs/1.3.0/quick-start.html
new file mode 100644
index 000000000..14799333c
--- /dev/null
+++ b/site/docs/1.3.0/quick-start.html
@@ -0,0 +1,558 @@
+<!DOCTYPE html>
+<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
+<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
+<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
+ <head>
+ <meta charset="utf-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
+ <title>Quick Start - Spark 1.3.0 Documentation</title>
+
+ <meta name="description" content="Quick start tutorial for Spark 1.3.0">
+
+
+
+
+ <link rel="stylesheet" href="css/bootstrap.min.css">
+ <style>
+ body {
+ padding-top: 60px;
+ padding-bottom: 40px;
+ }
+ </style>
+ <meta name="viewport" content="width=device-width">
+ <link rel="stylesheet" href="css/bootstrap-responsive.min.css">
+ <link rel="stylesheet" href="css/main.css">
+
+ <script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
+
+ <link rel="stylesheet" href="css/pygments-default.css">
+
+
+
+ </head>
+ <body>
+ <!--[if lt IE 7]>
+ <p class="chromeframe">You are using an outdated browser. <a href="http://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
+ <![endif]-->
+
+ <!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
+
+ <div class="navbar navbar-fixed-top" id="topbar">
+ <div class="navbar-inner">
+ <div class="container">
+ <div class="brand"><a href="index.html">
+ <img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">1.3.0</span>
+ </div>
+ <ul class="nav">
+ <!--TODO(andyk): Add class="active" attribute to li some how.-->
+ <li><a href="index.html">Overview</a></li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">Programming Guides<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="quick-start.html">Quick Start</a></li>
+ <li><a href="programming-guide.html">Spark Programming Guide</a></li>
+ <li class="divider"></li>
+ <li><a href="streaming-programming-guide.html">Spark Streaming</a></li>
+ <li><a href="sql-programming-guide.html">DataFrames and SQL</a></li>
+ <li><a href="mllib-guide.html">MLlib (Machine Learning)</a></li>
+ <li><a href="graphx-programming-guide.html">GraphX (Graph Processing)</a></li>
+ <li><a href="bagel-programming-guide.html">Bagel (Pregel on Spark)</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">API Docs<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="api/scala/index.html#org.apache.spark.package">Scala</a></li>
+ <li><a href="api/java/index.html">Java</a></li>
+ <li><a href="api/python/index.html">Python</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">Deploying<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="cluster-overview.html">Overview</a></li>
+ <li><a href="submitting-applications.html">Submitting Applications</a></li>
+ <li class="divider"></li>
+ <li><a href="spark-standalone.html">Spark Standalone</a></li>
+ <li><a href="running-on-mesos.html">Mesos</a></li>
+ <li><a href="running-on-yarn.html">YARN</a></li>
+ <li class="divider"></li>
+ <li><a href="ec2-scripts.html">Amazon EC2</a></li>
+ </ul>
+ </li>
+
+ <li class="dropdown">
+ <a href="api.html" class="dropdown-toggle" data-toggle="dropdown">More<b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="configuration.html">Configuration</a></li>
+ <li><a href="monitoring.html">Monitoring</a></li>
+ <li><a href="tuning.html">Tuning Guide</a></li>
+ <li><a href="job-scheduling.html">Job Scheduling</a></li>
+ <li><a href="security.html">Security</a></li>
+ <li><a href="hardware-provisioning.html">Hardware Provisioning</a></li>
+ <li><a href="hadoop-third-party-distributions.html">3<sup>rd</sup>-Party Hadoop Distros</a></li>
+ <li class="divider"></li>
+ <li><a href="building-spark.html">Building Spark</a></li>
+ <li><a href="https://cwiki.apache.org/confluence/display/SPARK/Contributing+to+Spark">Contributing to Spark</a></li>
+ <li><a href="https://cwiki.apache.org/confluence/display/SPARK/Supplemental+Spark+Projects">Supplemental Projects</a></li>
+ </ul>
+ </li>
+ </ul>
+ <!--<p class="navbar-text pull-right"><span class="version-text">v1.3.0</span></p>-->
+ </div>
+ </div>
+ </div>
+
+ <div class="container" id="content">
+
+ <h1 class="title">Quick Start</h1>
+
+
+ <ul id="markdown-toc">
+ <li><a href="#interactive-analysis-with-the-spark-shell">Interactive Analysis with the Spark Shell</a> <ul>
+ <li><a href="#basics">Basics</a></li>
+ <li><a href="#more-on-rdd-operations">More on RDD Operations</a></li>
+ <li><a href="#caching">Caching</a></li>
+ </ul>
+ </li>
+ <li><a href="#self-contained-applications">Self-Contained Applications</a></li>
+ <li><a href="#where-to-go-from-here">Where to Go from Here</a></li>
+</ul>
+
+<p>This tutorial provides a quick introduction to using Spark. We will first introduce the API through Spark&#8217;s
+interactive shell (in Python or Scala),
+then show how to write applications in Java, Scala, and Python.
+See the <a href="programming-guide.html">programming guide</a> for a more complete reference.</p>
+
+<p>To follow along with this guide, first download a packaged release of Spark from the
+<a href="http://spark.apache.org/downloads.html">Spark website</a>. Since we won&#8217;t be using HDFS,
+you can download a package for any version of Hadoop.</p>
+
+<h1 id="interactive-analysis-with-the-spark-shell">Interactive Analysis with the Spark Shell</h1>
+
+<h2 id="basics">Basics</h2>
+
+<p>Spark&#8217;s shell provides a simple way to learn the API, as well as a powerful tool to analyze data interactively.
+It is available in either Scala (which runs on the Java VM and is thus a good way to use existing Java libraries)
+or Python. Start it by running the following in the Spark directory:</p>
+
+<div class="codetabs">
+<div data-lang="scala">
+
+ <pre><code>./bin/spark-shell
+</code></pre>
+
+ <p>Spark&#8217;s primary abstraction is a distributed collection of items called a Resilient Distributed Dataset (RDD). RDDs can be created from Hadoop InputFormats (such as HDFS files) or by transforming other RDDs. Let&#8217;s make a new RDD from the text of the README file in the Spark source directory:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="k">val</span> <span class="n">textFile</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="s">&quot;README.md&quot;</span><span class="o">)</span>
+<span class="n">textFile</span><span class="k">:</span> <span class="kt">spark.RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="n">spark</span><span class="o">.</span><span class="nc">MappedRDD</span><span class="k">@</span><span class="mi">2</span><span class="n">ee9b6e3</span></code></pre></div>
+
+ <p>RDDs have <em><a href="programming-guide.html#actions">actions</a></em>, which return values, and <em><a href="programming-guide.html#transformations">transformations</a></em>, which return pointers to new RDDs. Let&#8217;s start with a few actions:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">count</span><span class="o">()</span> <span class="c1">// Number of items in this RDD</span>
+<span class="n">res0</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">126</span>
+
+<span class="n">scala</span><span class="o">&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">first</span><span class="o">()</span> <span class="c1">// First item in this RDD</span>
+<span class="n">res1</span><span class="k">:</span> <span class="kt">String</span> <span class="o">=</span> <span class="k">#</span> <span class="nc">Apache</span> <span class="nc">Spark</span></code></pre></div>
+
+ <p>Now let&#8217;s use a transformation. We will use the <a href="programming-guide.html#transformations"><code>filter</code></a> transformation to return a new RDD with a subset of the items in the file.</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="k">val</span> <span class="n">linesWithSpark</span> <span class="k">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">contains</span><span class="o">(</span><span class="s">&quot;Spark&quot;</span><span class="o">))</span>
+<span class="n">linesWithSpark</span><span class="k">:</span> <span class="kt">spark.RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="n">spark</span><span class="o">.</span><span class="nc">FilteredRDD</span><span class="k">@</span><span class="mi">7</span><span class="n">dd4af09</span></code></pre></div>
+
+ <p>We can chain together transformations and actions:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">contains</span><span class="o">(</span><span class="s">&quot;Spark&quot;</span><span class="o">)).</span><span class="n">count</span><span class="o">()</span> <span class="c1">// How many lines contain &quot;Spark&quot;?</span>
+<span class="n">res3</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">15</span></code></pre></div>
+
+ </div>
+<div data-lang="python">
+
+ <pre><code>./bin/pyspark
+</code></pre>
+
+ <p>Spark&#8217;s primary abstraction is a distributed collection of items called a Resilient Distributed Dataset (RDD). RDDs can be created from Hadoop InputFormats (such as HDFS files) or by transforming other RDDs. Let&#8217;s make a new RDD from the text of the README file in the Spark source directory:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">textFile</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">&quot;README.md&quot;</span><span class="p">)</span></code></pre></div>
+
+ <p>RDDs have <em><a href="programming-guide.html#actions">actions</a></em>, which return values, and <em><a href="programming-guide.html#transformations">transformations</a></em>, which return pointers to new RDDs. Let&#8217;s start with a few actions:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">count</span><span class="p">()</span> <span class="c"># Number of items in this RDD</span>
+<span class="mi">126</span>
+
+<span class="o">&gt;&gt;&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">first</span><span class="p">()</span> <span class="c"># First item in this RDD</span>
+<span class="s">u&#39;# Apache Spark&#39;</span></code></pre></div>
+
+ <p>Now let&#8217;s use a transformation. We will use the <a href="programming-guide.html#transformations"><code>filter</code></a> transformation to return a new RDD with a subset of the items in the file.</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">linesWithSpark</span> <span class="o">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="s">&quot;Spark&quot;</span> <span class="ow">in</span> <span class="n">line</span><span class="p">)</span></code></pre></div>
+
+ <p>We can chain together transformations and actions:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="s">&quot;Spark&quot;</span> <span class="ow">in</span> <span class="n">line</span><span class="p">)</span><span class="o">.</span><span class="n">count</span><span class="p">()</span> <span class="c"># How many lines contain &quot;Spark&quot;?</span>
+<span class="mi">15</span></code></pre></div>
+
+ </div>
+</div>
+
+<h2 id="more-on-rdd-operations">More on RDD Operations</h2>
+<p>RDD actions and transformations can be used for more complex computations. Let&#8217;s say we want to find the line with the most words:</p>
+
+<div class="codetabs">
+<div data-lang="scala">
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="o">(</span><span class="s">&quot; &quot;</span><span class="o">).</span><span class="n">size</span><span class="o">).</span><span class="n">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="k">if</span> <span class="o">(</span><span class="n">a</span> <span class="o">&gt;</span> <span class="n">b</span><span class="o">)</span> <span class="n">a</span> <span class="k">else</span> <span class="n">b</span><span class="o">)</span>
+<span class="n">res4</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">15</span></code></pre></div>
+
+ <p>This first maps a line to an integer value, creating a new RDD. <code>reduce</code> is called on that RDD to find the largest line count. The arguments to <code>map</code> and <code>reduce</code> are Scala function literals (closures), and can use any language feature or Scala/Java library. For example, we can easily call functions declared elsewhere. We&#8217;ll use <code>Math.max()</code> function to make this code easier to understand:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="k">import</span> <span class="nn">java.lang.Math</span>
+<span class="k">import</span> <span class="nn">java.lang.Math</span>
+
+<span class="n">scala</span><span class="o">&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="o">(</span><span class="s">&quot; &quot;</span><span class="o">).</span><span class="n">size</span><span class="o">).</span><span class="n">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="nc">Math</span><span class="o">.</span><span class="n">max</span><span class="o">(</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">))</span>
+<span class="n">res5</span><span class="k">:</span> <span class="kt">Int</span> <span class="o">=</span> <span class="mi">15</span></code></pre></div>
+
+ <p>One common data flow pattern is MapReduce, as popularized by Hadoop. Spark can implement MapReduce flows easily:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="k">val</span> <span class="n">wordCounts</span> <span class="k">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">flatMap</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="o">(</span><span class="s">&quot; &quot;</span><span class="o">)).</span><span class="n">map</span><span class="o">(</span><span class="n">word</span> <span class="k">=&gt;</span> <span class="o">(</span><span class="n">word</span><span class="o">,</span> <span class="mi">1</span><span class="o">)).</span><span class="n">reduceByKey</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span><span class="o">)</span>
+<span class="n">wordCounts</span><span class="k">:</span> <span class="kt">spark.RDD</span><span class="o">[(</span><span class="kt">String</span>, <span class="kt">Int</span><span class="o">)]</span> <span class="k">=</span> <span class="n">spark</span><span class="o">.</span><span class="nc">ShuffledAggregatedRDD</span><span class="k">@</span><span class="mi">71</span><span class="n">f027b8</span></code></pre></div>
+
+ <p>Here, we combined the <a href="programming-guide.html#transformations"><code>flatMap</code></a>, <a href="programming-guide.html#transformations"><code>map</code></a> and <a href="programming-guide.html#transformations"><code>reduceByKey</code></a> transformations to compute the per-word counts in the file as an RDD of (String, Int) pairs. To collect the word counts in our shell, we can use the <a href="programming-guide.html#actions"><code>collect</code></a> action:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="n">wordCounts</span><span class="o">.</span><span class="n">collect</span><span class="o">()</span>
+<span class="n">res6</span><span class="k">:</span> <span class="kt">Array</span><span class="o">[(</span><span class="kt">String</span>, <span class="kt">Int</span><span class="o">)]</span> <span class="k">=</span> <span class="nc">Array</span><span class="o">((</span><span class="n">means</span><span class="o">,</span><span class="mi">1</span><span class="o">),</span> <span class="o">(</span><span class="n">under</span><span class="o">,</span><span class="mi">2</span><span class="o">),</span> <span class="o">(</span><span class="k">this</span><span class="o">,</span><span class="mi">3</span><span class="o">),</span> <span class="o">(</span><span class="nc">Because</span><span class="o">,</span><span class="mi">1</span><span class="o">),</span> <span class="o">(</span><span class="nc">Python</span><span class="o">,</span><span class="mi">2</span><span class="o">),</span> <span class="o">(</span><span class="n">agree</span><span class="o">,</span><span class="mi">1</span><span class="o">),</span> <span class="o">(</span><span class="n">cluster</span><span class="o">.,</span><span class="mi">1</span><span class="o">),</span> <span class="o">...)</span></code></pre></div>
+
+ </div>
+<div data-lang="python">
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="nb">len</span><span class="p">(</span><span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="p">()))</span><span class="o">.</span><span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span> <span class="k">if</span> <span class="p">(</span><span class="n">a</span> <span class="o">&gt;</span> <span class="n">b</span><span class="p">)</span> <span class="k">else</span> <span class="n">b</span><span class="p">)</span>
+<span class="mi">15</span></code></pre></div>
+
+ <p>This first maps a line to an integer value, creating a new RDD. <code>reduce</code> is called on that RDD to find the largest line count. The arguments to <code>map</code> and <code>reduce</code> are Python <a href="https://docs.python.org/2/reference/expressions.html#lambda">anonymous functions (lambdas)</a>,
+but we can also pass any top-level Python function we want.
+For example, we&#8217;ll define a <code>max</code> function to make this code easier to understand:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="k">def</span> <span class="nf">max</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">):</span>
+<span class="o">...</span> <span class="k">if</span> <span class="n">a</span> <span class="o">&gt;</span> <span class="n">b</span><span class="p">:</span>
+<span class="o">...</span> <span class="k">return</span> <span class="n">a</span>
+<span class="o">...</span> <span class="k">else</span><span class="p">:</span>
+<span class="o">...</span> <span class="k">return</span> <span class="n">b</span>
+<span class="o">...</span>
+
+<span class="o">&gt;&gt;&gt;</span> <span class="n">textFile</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="nb">len</span><span class="p">(</span><span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="p">()))</span><span class="o">.</span><span class="n">reduce</span><span class="p">(</span><span class="nb">max</span><span class="p">)</span>
+<span class="mi">15</span></code></pre></div>
+
+ <p>One common data flow pattern is MapReduce, as popularized by Hadoop. Spark can implement MapReduce flows easily:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">wordCounts</span> <span class="o">=</span> <span class="n">textFile</span><span class="o">.</span><span class="n">flatMap</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">line</span><span class="o">.</span><span class="n">split</span><span class="p">())</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">word</span><span class="p">:</span> <span class="p">(</span><span class="n">word</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span><span class="o">.</span><span class="n">reduceByKey</span><span class="p">(</span><span class="k">lambda</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">:</span> <span class="n">a</span><span class="o">+</span><span class="n">b</span><span class="p">)</span></code></pre></div>
+
+ <p>Here, we combined the <a href="programming-guide.html#transformations"><code>flatMap</code></a>, <a href="programming-guide.html#transformations"><code>map</code></a> and <a href="programming-guide.html#transformations"><code>reduceByKey</code></a> transformations to compute the per-word counts in the file as an RDD of (string, int) pairs. To collect the word counts in our shell, we can use the <a href="programming-guide.html#actions"><code>collect</code></a> action:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">wordCounts</span><span class="o">.</span><span class="n">collect</span><span class="p">()</span>
+<span class="p">[(</span><span class="s">u&#39;and&#39;</span><span class="p">,</span> <span class="mi">9</span><span class="p">),</span> <span class="p">(</span><span class="s">u&#39;A&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="p">(</span><span class="s">u&#39;webpage&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="p">(</span><span class="s">u&#39;README&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="p">(</span><span class="s">u&#39;Note&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="p">(</span><span class="s">u&#39;&quot;local&quot;&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="p">(</span><span class="s">u&#39;variable&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="o">...</span><span class="p">]</span></code></pre></div>
+
+ </div>
+</div>
+
+<h2 id="caching">Caching</h2>
+<p>Spark also supports pulling data sets into a cluster-wide in-memory cache. This is very useful when data is accessed repeatedly, such as when querying a small &#8220;hot&#8221; dataset or when running an iterative algorithm like PageRank. As a simple example, let&#8217;s mark our <code>linesWithSpark</code> dataset to be cached:</p>
+
+<div class="codetabs">
+<div data-lang="scala">
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">&gt;</span> <span class="n">linesWithSpark</span><span class="o">.</span><span class="n">cache</span><span class="o">()</span>
+<span class="n">res7</span><span class="k">:</span> <span class="kt">spark.RDD</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="n">spark</span><span class="o">.</span><span class="nc">FilteredRDD</span><span class="k">@</span><span class="mi">17</span><span class="n">e51082</span>
+
+<span class="n">scala</span><span class="o">&gt;</span> <span class="n">linesWithSpark</span><span class="o">.</span><span class="n">count</span><span class="o">()</span>
+<span class="n">res8</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">15</span>
+
+<span class="n">scala</span><span class="o">&gt;</span> <span class="n">linesWithSpark</span><span class="o">.</span><span class="n">count</span><span class="o">()</span>
+<span class="n">res9</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">15</span></code></pre></div>
+
+ <p>It may seem silly to use Spark to explore and cache a 100-line text file. The interesting part is
+that these same functions can be used on very large data sets, even when they are striped across
+tens or hundreds of nodes. You can also do this interactively by connecting <code>bin/spark-shell</code> to
+a cluster, as described in the <a href="programming-guide.html#initializing-spark">programming guide</a>.</p>
+
+ </div>
+<div data-lang="python">
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">&gt;&gt;&gt;</span> <span class="n">linesWithSpark</span><span class="o">.</span><span class="n">cache</span><span class="p">()</span>
+
+<span class="o">&gt;&gt;&gt;</span> <span class="n">linesWithSpark</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
+<span class="mi">15</span>
+
+<span class="o">&gt;&gt;&gt;</span> <span class="n">linesWithSpark</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
+<span class="mi">15</span></code></pre></div>
+
+ <p>It may seem silly to use Spark to explore and cache a 100-line text file. The interesting part is
+that these same functions can be used on very large data sets, even when they are striped across
+tens or hundreds of nodes. You can also do this interactively by connecting <code>bin/pyspark</code> to
+a cluster, as described in the <a href="programming-guide.html#initializing-spark">programming guide</a>.</p>
+
+ </div>
+</div>
+
+<h1 id="self-contained-applications">Self-Contained Applications</h1>
+<p>Now say we wanted to write a self-contained application using the Spark API. We will walk through a
+simple application in both Scala (with SBT), Java (with Maven), and Python.</p>
+
+<div class="codetabs">
+<div data-lang="scala">
+
+ <p>We&#8217;ll create a very simple Spark application in Scala. So simple, in fact, that it&#8217;s
+named <code>SimpleApp.scala</code>:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="cm">/* SimpleApp.scala */</span>
+<span class="k">import</span> <span class="nn">org.apache.spark.SparkContext</span>
+<span class="k">import</span> <span class="nn">org.apache.spark.SparkContext._</span>
+<span class="k">import</span> <span class="nn">org.apache.spark.SparkConf</span>
+
+<span class="k">object</span> <span class="nc">SimpleApp</span> <span class="o">{</span>
+ <span class="k">def</span> <span class="n">main</span><span class="o">(</span><span class="n">args</span><span class="k">:</span> <span class="kt">Array</span><span class="o">[</span><span class="kt">String</span><span class="o">])</span> <span class="o">{</span>
+ <span class="k">val</span> <span class="n">logFile</span> <span class="k">=</span> <span class="s">&quot;YOUR_SPARK_HOME/README.md&quot;</span> <span class="c1">// Should be some file on your system</span>
+ <span class="k">val</span> <span class="n">conf</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkConf</span><span class="o">().</span><span class="n">setAppName</span><span class="o">(</span><span class="s">&quot;Simple Application&quot;</span><span class="o">)</span>
+ <span class="k">val</span> <span class="n">sc</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">)</span>
+ <span class="k">val</span> <span class="n">logData</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="o">(</span><span class="n">logFile</span><span class="o">,</span> <span class="mi">2</span><span class="o">).</span><span class="n">cache</span><span class="o">()</span>
+ <span class="k">val</span> <span class="n">numAs</span> <span class="k">=</span> <span class="n">logData</span><span class="o">.</span><span class="n">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">contains</span><span class="o">(</span><span class="s">&quot;a&quot;</span><span class="o">)).</span><span class="n">count</span><span class="o">()</span>
+ <span class="k">val</span> <span class="n">numBs</span> <span class="k">=</span> <span class="n">logData</span><span class="o">.</span><span class="n">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=&gt;</span> <span class="n">line</span><span class="o">.</span><span class="n">contains</span><span class="o">(</span><span class="s">&quot;b&quot;</span><span class="o">)).</span><span class="n">count</span><span class="o">()</span>
+ <span class="n">println</span><span class="o">(</span><span class="s">&quot;Lines with a: %s, Lines with b: %s&quot;</span><span class="o">.</span><span class="n">format</span><span class="o">(</span><span class="n">numAs</span><span class="o">,</span> <span class="n">numBs</span><span class="o">))</span>
+ <span class="o">}</span>
+<span class="o">}</span></code></pre></div>
+
+ <p>Note that applications should define a <code>main()</code> method instead of extending <code>scala.App</code>.
+Subclasses of <code>scala.App</code> may not work correctly.</p>
+
+ <p>This program just counts the number of lines containing &#8216;a&#8217; and the number containing &#8216;b&#8217; in the
+Spark README. Note that you&#8217;ll need to replace YOUR_SPARK_HOME with the location where Spark is
+installed. Unlike the earlier examples with the Spark shell, which initializes its own SparkContext,
+we initialize a SparkContext as part of the program.</p>
+
+ <p>We pass the SparkContext constructor a
+<a href="api/scala/index.html#org.apache.spark.SparkConf">SparkConf</a>
+object which contains information about our
+application. </p>
+
+ <p>Our application depends on the Spark API, so we&#8217;ll also include an sbt configuration file,
+<code>simple.sbt</code> which explains that Spark is a dependency. This file also adds a repository that
+Spark depends on:</p>
+
+ <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">name</span> <span class="o">:=</span> <span class="s">&quot;Simple Project&quot;</span>
+
+<span class="n">version</span> <span class="o">:=</span> <span class="s">&quot;1.0&quot;</span>
+
+<span class="n">scalaVersion</span> <span class="o">:=</span> <span class="s">&quot;2.10.4&quot;</span>
+
+<span class="n">libraryDependencies</span> <span class="o">+=</span> <span class="s">&quot;org.apache.spark&quot;</span> <span class="o">%%</span> <span class="s">&quot;spark-core&quot;</span> <span class="o">%</span> <span class="s">&quot;1.3.0&quot;</span></code></pre></div>
+
+ <p>For sbt to work correctly, we&#8217;ll need to layout <code>SimpleApp.scala</code> and <code>simple.sbt</code>
+according to the typical directory structure. Once that is in place, we can create a JAR package
+containing the application&#8217;s code, then use the <code>spark-submit</code> script to run our program.</p>
+
+ <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># Your directory layout should look like this</span>
+<span class="nv">$ </span>find .
+.
+./simple.sbt
+./src
+./src/main
+./src/main/scala
+./src/main/scala/SimpleApp.scala
+
+<span class="c"># Package a jar containing your application</span>
+<span class="nv">$ </span>sbt package
+...
+<span class="o">[</span>info<span class="o">]</span> Packaging <span class="o">{</span>..<span class="o">}</span>/<span class="o">{</span>..<span class="o">}</span>/target/scala-2.10/simple-project_2.10-1.0.jar
+
+<span class="c"># Use spark-submit to run your application</span>
+<span class="nv">$ </span>YOUR_SPARK_HOME/bin/spark-submit <span class="se">\</span>
+ --class <span class="s2">&quot;SimpleApp&quot;</span> <span class="se">\</span>
+ --master <span class="nb">local</span><span class="o">[</span>4<span class="o">]</span> <span class="se">\</span>
+ target/scala-2.10/simple-project_2.10-1.0.jar
+...
+Lines with a: 46, Lines with b: 23</code></pre></div>
+
+ </div>
+<div data-lang="java">
+ <p>This example will use Maven to compile an application jar, but any similar build system will work.</p>
+
+ <p>We&#8217;ll create a very simple Spark application, <code>SimpleApp.java</code>:</p>
+
+ <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="cm">/* SimpleApp.java */</span>
+<span class="kn">import</span> <span class="nn">org.apache.spark.api.java.*</span><span class="o">;</span>
+<span class="kn">import</span> <span class="nn">org.apache.spark.SparkConf</span><span class="o">;</span>
+<span class="kn">import</span> <span class="nn">org.apache.spark.api.java.function.Function</span><span class="o">;</span>
+
+<span class="kd">public</span> <span class="kd">class</span> <span class="nc">SimpleApp</span> <span class="o">{</span>
+ <span class="kd">public</span> <span class="kd">static</span> <span class="kt">void</span> <span class="nf">main</span><span class="o">(</span><span class="n">String</span><span class="o">[]</span> <span class="n">args</span><span class="o">)</span> <span class="o">{</span>
+ <span class="n">String</span> <span class="n">logFile</span> <span class="o">=</span> <span class="s">&quot;YOUR_SPARK_HOME/README.md&quot;</span><span class="o">;</span> <span class="c1">// Should be some file on your system</span>
+ <span class="n">SparkConf</span> <span class="n">conf</span> <span class="o">=</span> <span class="k">new</span> <span class="nf">SparkConf</span><span class="o">().</span><span class="na">setAppName</span><span class="o">(</span><span class="s">&quot;Simple Application&quot;</span><span class="o">);</span>
+ <span class="n">JavaSparkContext</span> <span class="n">sc</span> <span class="o">=</span> <span class="k">new</span> <span class="nf">JavaSparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">);</span>
+ <span class="n">JavaRDD</span><span class="o">&lt;</span><span class="n">String</span><span class="o">&gt;</span> <span class="n">logData</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="n">logFile</span><span class="o">).</span><span class="na">cache</span><span class="o">();</span>
+
+ <span class="kt">long</span> <span class="n">numAs</span> <span class="o">=</span> <span class="n">logData</span><span class="o">.</span><span class="na">filter</span><span class="o">(</span><span class="k">new</span> <span class="n">Function</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">Boolean</span><span class="o">&gt;()</span> <span class="o">{</span>
+ <span class="kd">public</span> <span class="n">Boolean</span> <span class="nf">call</span><span class="o">(</span><span class="n">String</span> <span class="n">s</span><span class="o">)</span> <span class="o">{</span> <span class="k">return</span> <span class="n">s</span><span class="o">.</span><span class="na">contains</span><span class="o">(</span><span class="s">&quot;a&quot;</span><span class="o">);</span> <span class="o">}</span>
+ <span class="o">}).</span><span class="na">count</span><span class="o">();</span>
+
+ <span class="kt">long</span> <span class="n">numBs</span> <span class="o">=</span> <span class="n">logData</span><span class="o">.</span><span class="na">filter</span><span class="o">(</span><span class="k">new</span> <span class="n">Function</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">Boolean</span><span class="o">&gt;()</span> <span class="o">{</span>
+ <span class="kd">public</span> <span class="n">Boolean</span> <span class="nf">call</span><span class="o">(</span><span class="n">String</span> <span class="n">s</span><span class="o">)</span> <span class="o">{</span> <span class="k">return</span> <span class="n">s</span><span class="o">.</span><span class="na">contains</span><span class="o">(</span><span class="s">&quot;b&quot;</span><span class="o">);</span> <span class="o">}</span>
+ <span class="o">}).</span><span class="na">count</span><span class="o">();</span>
+
+ <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">&quot;Lines with a: &quot;</span> <span class="o">+</span> <span class="n">numAs</span> <span class="o">+</span> <span class="s">&quot;, lines with b: &quot;</span> <span class="o">+</span> <span class="n">numBs</span><span class="o">);</span>
+ <span class="o">}</span>
+<span class="o">}</span></code></pre></div>
+
+ <p>This program just counts the number of lines containing &#8216;a&#8217; and the number containing &#8216;b&#8217; in a text
+file. Note that you&#8217;ll need to replace YOUR_SPARK_HOME with the location where Spark is installed.
+As with the Scala example, we initialize a SparkContext, though we use the special
+<code>JavaSparkContext</code> class to get a Java-friendly one. We also create RDDs (represented by
+<code>JavaRDD</code>) and run transformations on them. Finally, we pass functions to Spark by creating classes
+that extend <code>spark.api.java.function.Function</code>. The
+<a href="programming-guide.html">Spark programming guide</a> describes these differences in more detail.</p>
+
+ <p>To build the program, we also write a Maven <code>pom.xml</code> file that lists Spark as a dependency.
+Note that Spark artifacts are tagged with a Scala version.</p>
+
+ <div class="highlight"><pre><code class="language-xml" data-lang="xml"><span class="nt">&lt;project&gt;</span>
+ <span class="nt">&lt;groupId&gt;</span>edu.berkeley<span class="nt">&lt;/groupId&gt;</span>
+ <span class="nt">&lt;artifactId&gt;</span>simple-project<span class="nt">&lt;/artifactId&gt;</span>
+ <span class="nt">&lt;modelVersion&gt;</span>4.0.0<span class="nt">&lt;/modelVersion&gt;</span>
+ <span class="nt">&lt;name&gt;</span>Simple Project<span class="nt">&lt;/name&gt;</span>
+ <span class="nt">&lt;packaging&gt;</span>jar<span class="nt">&lt;/packaging&gt;</span>
+ <span class="nt">&lt;version&gt;</span>1.0<span class="nt">&lt;/version&gt;</span>
+ <span class="nt">&lt;dependencies&gt;</span>
+ <span class="nt">&lt;dependency&gt;</span> <span class="c">&lt;!-- Spark dependency --&gt;</span>
+ <span class="nt">&lt;groupId&gt;</span>org.apache.spark<span class="nt">&lt;/groupId&gt;</span>
+ <span class="nt">&lt;artifactId&gt;</span>spark-core_2.10<span class="nt">&lt;/artifactId&gt;</span>
+ <span class="nt">&lt;version&gt;</span>1.3.0<span class="nt">&lt;/version&gt;</span>
+ <span class="nt">&lt;/dependency&gt;</span>
+ <span class="nt">&lt;/dependencies&gt;</span>
+<span class="nt">&lt;/project&gt;</span></code></pre></div>
+
+ <p>We lay out these files according to the canonical Maven directory structure:</p>
+
+ <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ </span>find .
+./pom.xml
+./src
+./src/main
+./src/main/java
+./src/main/java/SimpleApp.java</code></pre></div>
+
+ <p>Now, we can package the application using Maven and execute it with <code>./bin/spark-submit</code>.</p>
+
+ <div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># Package a jar containing your application</span>
+<span class="nv">$ </span>mvn package
+...
+<span class="o">[</span>INFO<span class="o">]</span> Building jar: <span class="o">{</span>..<span class="o">}</span>/<span class="o">{</span>..<span class="o">}</span>/target/simple-project-1.0.jar
+
+<span class="c"># Use spark-submit to run your application</span>
+<span class="nv">$ </span>YOUR_SPARK_HOME/bin/spark-submit <span class="se">\</span>
+ --class <span class="s2">&quot;SimpleApp&quot;</span> <span class="se">\</span>
+ --master <span class="nb">local</span><span class="o">[</span>4<span class="o">]</span> <span class="se">\</span>
+ target/simple-project-1.0.jar
+...
+Lines with a: 46, Lines with b: 23</code></pre></div>
+
+ </div>
+<div data-lang="python">
+
+ <p>Now we will show how to write an application using the Python API (PySpark).</p>
+
+ <p>As an example, we&#8217;ll create a simple Spark application, <code>SimpleApp.py</code>:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="sd">&quot;&quot;&quot;SimpleApp.py&quot;&quot;&quot;</span>
+<span class="kn">from</span> <span class="nn">pyspark</span> <span class="kn">import</span> <span class="n">SparkContext</span>
+
+<span class="n">logFile</span> <span class="o">=</span> <span class="s">&quot;YOUR_SPARK_HOME/README.md&quot;</span> <span class="c"># Should be some file on your system</span>
+<span class="n">sc</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="p">(</span><span class="s">&quot;local&quot;</span><span class="p">,</span> <span class="s">&quot;Simple App&quot;</span><span class="p">)</span>
+<span class="n">logData</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="n">logFile</span><span class="p">)</span><span class="o">.</span><span class="n">cache</span><span class="p">()</span>
+
+<span class="n">numAs</span> <span class="o">=</span> <span class="n">logData</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="s">&#39;a&#39;</span> <span class="ow">in</span> <span class="n">s</span><span class="p">)</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
+<span class="n">numBs</span> <span class="o">=</span> <span class="n">logData</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="s">&#39;b&#39;</span> <span class="ow">in</span> <span class="n">s</span><span class="p">)</span><span class="o">.</span><span class="n">count</span><span class="p">()</span>
+
+<span class="k">print</span> <span class="s">&quot;Lines with a: </span><span class="si">%i</span><span class="s">, lines with b: </span><span class="si">%i</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">numAs</span><span class="p">,</span> <span class="n">numBs</span><span class="p">)</span></code></pre></div>
+
+ <p>This program just counts the number of lines containing &#8216;a&#8217; and the number containing &#8216;b&#8217; in a
+text file.
+Note that you&#8217;ll need to replace YOUR_SPARK_HOME with the location where Spark is installed.
+As with the Scala and Java examples, we use a SparkContext to create RDDs.
+We can pass Python functions to Spark, which are automatically serialized along with any variables
+that they reference.
+For applications that use custom classes or third-party libraries, we can also add code
+dependencies to <code>spark-submit</code> through its <code>--py-files</code> argument by packaging them into a
+.zip file (see <code>spark-submit --help</code> for details).
+<code>SimpleApp</code> is simple enough that we do not need to specify any code dependencies.</p>
+
+ <p>We can run this application using the <code>bin/spark-submit</code> script:</p>
+
+ <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="c"># Use spark-submit to run your application</span>
+<span class="err">$</span> <span class="n">YOUR_SPARK_HOME</span><span class="o">/</span><span class="nb">bin</span><span class="o">/</span><span class="n">spark</span><span class="o">-</span><span class="n">submit</span> \
+ <span class="o">--</span><span class="n">master</span> <span class="n">local</span><span class="p">[</span><span class="mi">4</span><span class="p">]</span> \
+ <span class="n">SimpleApp</span><span class="o">.</span><span class="n">py</span>
+<span class="o">...</span>
+<span class="n">Lines</span> <span class="k">with</span> <span class="n">a</span><span class="p">:</span> <span class="mi">46</span><span class="p">,</span> <span class="n">Lines</span> <span class="k">with</span> <span class="n">b</span><span class="p">:</span> <span class="mi">23</span></code></pre></div>
+
+ </div>
+</div>
+
+<h1 id="where-to-go-from-here">Where to Go from Here</h1>
+<p>Congratulations on running your first Spark application!</p>
+
+<ul>
+ <li>For an in-depth overview of the API, start with the <a href="programming-guide.html">Spark programming guide</a>,
+or see &#8220;Programming Guides&#8221; menu for other components.</li>
+ <li>For running applications on a cluster, head to the <a href="cluster-overview.html">deployment overview</a>.</li>
+ <li>Finally, Spark includes several samples in the <code>examples</code> directory
+(<a href="https://github.com/apache/spark/tree/master/examples/src/main/scala/org/apache/spark/examples">Scala</a>,
+ <a href="https://github.com/apache/spark/tree/master/examples/src/main/java/org/apache/spark/examples">Java</a>,
+ <a href="https://github.com/apache/spark/tree/master/examples/src/main/python">Python</a>).
+You can run them as follows:</li>
+</ul>
+
+<div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># For Scala and Java, use run-example:</span>
+./bin/run-example SparkPi
+
+<span class="c"># For Python examples, use spark-submit directly:</span>
+./bin/spark-submit examples/src/main/python/pi.py</code></pre></div>
+
+
+
+ </div> <!-- /container -->
+
+ <script src="js/vendor/jquery-1.8.0.min.js"></script>
+ <script src="js/vendor/bootstrap.min.js"></script>
+ <script src="js/main.js"></script>
+
+ <!-- MathJax Section -->
+ <script type="text/x-mathjax-config">
+ MathJax.Hub.Config({
+ TeX: { equationNumbers: { autoNumber: "AMS" } }
+ });
+ </script>
+ <script>
+ // Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
+ // We could use "//cdn.mathjax...", but that won't support "file://".
+ (function(d, script) {
+ script = d.createElement('script');
+ script.type = 'text/javascript';
+ script.async = true;
+ script.onload = function(){
+ MathJax.Hub.Config({
+ tex2jax: {
+ inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
+ displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
+ processEscapes: true,
+ skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
+ }
+ });
+ };
+ script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
+ 'cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML';
+ d.getElementsByTagName('head')[0].appendChild(script);
+ }(document));
+ </script>
+ </body>
+</html>