CINXE.COM

Overview - Spark 3.5.3 Documentation

<!DOCTYPE html> <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]--> <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]--> <!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]--> <!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]--> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Overview - Spark 3.5.3 Documentation</title> <meta name="description" content="Apache Spark 3.5.3 documentation homepage"> <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous"> <link rel="preconnect" href="https://fonts.googleapis.com"> <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> <link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&Courier+Prime:wght@400;700&display=swap" rel="stylesheet"> <link href="css/custom.css" rel="stylesheet"> <script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script> <link rel="stylesheet" href="css/pygments-default.css"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" /> <link rel="stylesheet" href="css/docsearch.css"> <!-- Matomo --> <script> var _paq = window._paq = window._paq || []; /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ _paq.push(["disableCookies"]); _paq.push(['trackPageView']); _paq.push(['enableLinkTracking']); (function() { var u="https://analytics.apache.org/"; _paq.push(['setTrackerUrl', u+'matomo.php']); _paq.push(['setSiteId', '40']); var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s); })(); </script> <!-- End Matomo Code --> </head> <body class="global"> <!--[if lt IE 7]> <p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p> <![endif]--> <!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html --> <nav class="navbar navbar-expand-lg navbar-dark p-0 px-4 fixed-top" style="background: #1d6890;" id="topbar"> <div class="navbar-brand"><a href="index.html"> <img src="img/spark-logo-rev.svg" width="141" height="72"/></a><span class="version">3.5.3</span> </div> <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarCollapse" aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="collapse navbar-collapse" id="navbarCollapse"> <ul class="navbar-nav me-auto"> <li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li> <li class="nav-item dropdown"> <a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a> <div class="dropdown-menu" aria-labelledby="navbarQuickStart"> <a class="dropdown-item" href="quick-start.html">Quick Start</a> <a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a> <a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a> <a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a> <a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a> <a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a> <a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a> <a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a> <a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a> </div> </li> <li class="nav-item dropdown"> <a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a> <div class="dropdown-menu" aria-labelledby="navbarAPIDocs"> <a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a> <a class="dropdown-item" href="api/java/index.html">Java</a> <a class="dropdown-item" href="api/python/index.html">Python</a> <a class="dropdown-item" href="api/R/index.html">R</a> <a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a> </div> </li> <li class="nav-item dropdown"> <a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a> <div class="dropdown-menu" aria-labelledby="navbarDeploying"> <a class="dropdown-item" href="cluster-overview.html">Overview</a> <a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a> <div class="dropdown-divider"></div> <a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a> <a class="dropdown-item" href="running-on-mesos.html">Mesos</a> <a class="dropdown-item" href="running-on-yarn.html">YARN</a> <a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a> </div> </li> <li class="nav-item dropdown"> <a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a> <div class="dropdown-menu" aria-labelledby="navbarMore"> <a class="dropdown-item" href="configuration.html">Configuration</a> <a class="dropdown-item" href="monitoring.html">Monitoring</a> <a class="dropdown-item" href="tuning.html">Tuning Guide</a> <a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a> <a class="dropdown-item" href="security.html">Security</a> <a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a> <a class="dropdown-item" href="migration-guide.html">Migration Guide</a> <div class="dropdown-divider"></div> <a class="dropdown-item" href="building-spark.html">Building Spark</a> <a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a> <a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a> </div> </li> <li class="nav-item"> <input type="text" id="docsearch-input" placeholder="Search the docs…"> </li> </ul> <!--<span class="navbar-text navbar-right"><span class="version-text">v3.5.3</span></span>--> </div> </nav> <section class="hero-banner position-relative"> <div class="container position-relative"> <div class="row"> <h1 style="max-width: 680px;">Apache Spark - A Unified engine for large-scale data analytics</h1> </div> <div class="content mr-3"> Apache Spark is a unified analytics engine for large-scale data processing. It provides high-level APIs in Java, Scala, Python and R, and an optimized engine that supports general execution graphs. It also supports a rich set of higher-level tools including <a href="sql-programming-guide.html">Spark SQL</a> for SQL and structured data processing, <a href="api/python/getting_started/quickstart_ps.html">pandas API on Spark</a> for pandas workloads, <a href="ml-guide.html">MLlib</a> for machine learning, <a href="graphx-programming-guide.html">GraphX</a> for graph processing, and <a href="structured-streaming-programming-guide.html">Structured Streaming</a> for incremental computation and stream processing. </div> </div> </section> <div class="container"> <div class="content mr-3" id="content"> <h1 id="downloading">Downloading</h1> <p>Get Spark from the <a href="https://spark.apache.org/downloads.html">downloads page</a> of the project website. This documentation is for Spark version 3.5.3. Spark uses Hadoop&#8217;s client libraries for HDFS and YARN. Downloads are pre-packaged for a handful of popular Hadoop versions. Users can also download a &#8220;Hadoop free&#8221; binary and run Spark with any Hadoop version <a href="hadoop-provided.html">by augmenting Spark&#8217;s classpath</a>. Scala and Java users can include Spark in their projects using its Maven coordinates and Python users can install Spark from PyPI.</p> <p>If you&#8217;d like to build Spark from source, visit <a href="building-spark.html">Building Spark</a>.</p> <p>Spark runs on both Windows and UNIX-like systems (e.g. Linux, Mac OS), and it should run on any platform that runs a supported version of Java. This should include JVMs on x86_64 and ARM64. It&#8217;s easy to run locally on one machine &#8212; all you need is to have <code class="language-plaintext highlighter-rouge">java</code> installed on your system <code class="language-plaintext highlighter-rouge">PATH</code>, or the <code class="language-plaintext highlighter-rouge">JAVA_HOME</code> environment variable pointing to a Java installation.</p> <p>Spark runs on Java 8/11/17, Scala 2.12/2.13, Python 3.8+, and R 3.5+. Java 8 prior to version 8u371 support is deprecated as of Spark 3.5.0. When using the Scala API, it is necessary for applications to use the same version of Scala that Spark was compiled for. For example, when using Scala 2.13, use Spark compiled for 2.13, and compile code/applications for Scala 2.13 as well.</p> <p>For Java 11, setting <code class="language-plaintext highlighter-rouge">-Dio.netty.tryReflectionSetAccessible=true</code> is required for the Apache Arrow library. This prevents the <code class="language-plaintext highlighter-rouge">java.lang.UnsupportedOperationException: sun.misc.Unsafe or java.nio.DirectByteBuffer.(long, int) not available</code> error when Apache Arrow uses Netty internally.</p> <h1 id="running-the-examples-and-shell">Running the Examples and Shell</h1> <p>Spark comes with several sample programs. Python, Scala, Java, and R examples are in the <code class="language-plaintext highlighter-rouge">examples/src/main</code> directory.</p> <p>To run Spark interactively in a Python interpreter, use <code class="language-plaintext highlighter-rouge">bin/pyspark</code>:</p> <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/pyspark --master "local[2]" </code></pre></div></div> <p>Sample applications are provided in Python. For example:</p> <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/spark-submit examples/src/main/python/pi.py 10 </code></pre></div></div> <p>To run one of the Scala or Java sample programs, use <code class="language-plaintext highlighter-rouge">bin/run-example &lt;class&gt; [params]</code> in the top-level Spark directory. (Behind the scenes, this invokes the more general <a href="submitting-applications.html"><code class="language-plaintext highlighter-rouge">spark-submit</code> script</a> for launching applications). For example,</p> <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/run-example SparkPi 10 </code></pre></div></div> <p>You can also run Spark interactively through a modified version of the Scala shell. This is a great way to learn the framework.</p> <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/spark-shell --master "local[2]" </code></pre></div></div> <p>The <code class="language-plaintext highlighter-rouge">--master</code> option specifies the <a href="submitting-applications.html#master-urls">master URL for a distributed cluster</a>, or <code class="language-plaintext highlighter-rouge">local</code> to run locally with one thread, or <code class="language-plaintext highlighter-rouge">local[N]</code> to run locally with N threads. You should start by using <code class="language-plaintext highlighter-rouge">local</code> for testing. For a full list of options, run the Spark shell with the <code class="language-plaintext highlighter-rouge">--help</code> option.</p> <p>Since version 1.4, Spark has provided an <a href="sparkr.html">R API</a> (only the DataFrame APIs are included). To run Spark interactively in an R interpreter, use <code class="language-plaintext highlighter-rouge">bin/sparkR</code>:</p> <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/sparkR --master "local[2]" </code></pre></div></div> <p>Example applications are also provided in R. For example:</p> <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/spark-submit examples/src/main/r/dataframe.R </code></pre></div></div> <h2 id="running-spark-client-applications-anywhere-with-spark-connect">Running Spark Client Applications Anywhere with Spark Connect</h2> <p>Spark Connect is a new client-server architecture introduced in Spark 3.4 that decouples Spark client applications and allows remote connectivity to Spark clusters. The separation between client and server allows Spark and its open ecosystem to be leveraged from anywhere, embedded in any application. In Spark 3.4, Spark Connect provides DataFrame API coverage for PySpark and DataFrame/Dataset API support in Scala.</p> <p>To learn more about Spark Connect and how to use it, see <a href="spark-connect-overview.html">Spark Connect Overview</a>.</p> <h1 id="launching-on-a-cluster">Launching on a Cluster</h1> <p>The Spark <a href="cluster-overview.html">cluster mode overview</a> explains the key concepts in running on a cluster. Spark can run both by itself, or over several existing cluster managers. It currently provides several options for deployment:</p> <ul> <li><a href="spark-standalone.html">Standalone Deploy Mode</a>: simplest way to deploy Spark on a private cluster</li> <li><a href="running-on-mesos.html">Apache Mesos</a> (deprecated)</li> <li><a href="running-on-yarn.html">Hadoop YARN</a></li> <li><a href="running-on-kubernetes.html">Kubernetes</a></li> </ul> <h1 id="where-to-go-from-here">Where to Go from Here</h1> <p><strong>Programming Guides:</strong></p> <ul> <li><a href="quick-start.html">Quick Start</a>: a quick introduction to the Spark API; start here!</li> <li><a href="rdd-programming-guide.html">RDD Programming Guide</a>: overview of Spark basics - RDDs (core but old API), accumulators, and broadcast variables</li> <li><a href="sql-programming-guide.html">Spark SQL, Datasets, and DataFrames</a>: processing structured data with relational queries (newer API than RDDs)</li> <li><a href="structured-streaming-programming-guide.html">Structured Streaming</a>: processing structured data streams with relation queries (using Datasets and DataFrames, newer API than DStreams)</li> <li><a href="streaming-programming-guide.html">Spark Streaming</a>: processing data streams using DStreams (old API)</li> <li><a href="ml-guide.html">MLlib</a>: applying machine learning algorithms</li> <li><a href="graphx-programming-guide.html">GraphX</a>: processing graphs</li> <li><a href="sparkr.html">SparkR</a>: processing data with Spark in R</li> <li><a href="api/python/getting_started/index.html">PySpark</a>: processing data with Spark in Python</li> <li><a href="sql-distributed-sql-engine-spark-sql-cli.html">Spark SQL CLI</a>: processing data with SQL on the command line</li> </ul> <p><strong>API Docs:</strong></p> <ul> <li><a href="api/scala/org/apache/spark/index.html">Spark Scala API (Scaladoc)</a></li> <li><a href="api/java/index.html">Spark Java API (Javadoc)</a></li> <li><a href="api/python/index.html">Spark Python API (Sphinx)</a></li> <li><a href="api/R/index.html">Spark R API (Roxygen2)</a></li> <li><a href="api/sql/index.html">Spark SQL, Built-in Functions (MkDocs)</a></li> </ul> <p><strong>Deployment Guides:</strong></p> <ul> <li><a href="cluster-overview.html">Cluster Overview</a>: overview of concepts and components when running on a cluster</li> <li><a href="submitting-applications.html">Submitting Applications</a>: packaging and deploying applications</li> <li>Deployment modes: <ul> <li><a href="https://github.com/amplab/spark-ec2">Amazon EC2</a>: scripts that let you launch a cluster on EC2 in about 5 minutes</li> <li><a href="spark-standalone.html">Standalone Deploy Mode</a>: launch a standalone cluster quickly without a third-party cluster manager</li> <li><a href="running-on-mesos.html">Mesos</a>: deploy a private cluster using <a href="https://mesos.apache.org">Apache Mesos</a></li> <li><a href="running-on-yarn.html">YARN</a>: deploy Spark on top of Hadoop NextGen (YARN)</li> <li><a href="running-on-kubernetes.html">Kubernetes</a>: deploy Spark on top of Kubernetes</li> </ul> </li> </ul> <p><strong>Other Documents:</strong></p> <ul> <li><a href="configuration.html">Configuration</a>: customize Spark via its configuration system</li> <li><a href="monitoring.html">Monitoring</a>: track the behavior of your applications</li> <li><a href="tuning.html">Tuning Guide</a>: best practices to optimize performance and memory use</li> <li><a href="job-scheduling.html">Job Scheduling</a>: scheduling resources across and within Spark applications</li> <li><a href="security.html">Security</a>: Spark security support</li> <li><a href="hardware-provisioning.html">Hardware Provisioning</a>: recommendations for cluster hardware</li> <li>Integration with other storage systems: <ul> <li><a href="cloud-integration.html">Cloud Infrastructures</a></li> <li><a href="storage-openstack-swift.html">OpenStack Swift</a></li> </ul> </li> <li><a href="migration-guide.html">Migration Guide</a>: Migration guides for Spark components</li> <li><a href="building-spark.html">Building Spark</a>: build Spark using the Maven system</li> <li><a href="https://spark.apache.org/contributing.html">Contributing to Spark</a></li> <li><a href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>: related third party Spark projects</li> </ul> <p><strong>External Resources:</strong></p> <ul> <li><a href="https://spark.apache.org">Spark Homepage</a></li> <li><a href="https://spark.apache.org/community.html">Spark Community</a> resources, including local meetups</li> <li><a href="http://stackoverflow.com/questions/tagged/apache-spark">StackOverflow tag <code class="language-plaintext highlighter-rouge">apache-spark</code></a></li> <li><a href="https://spark.apache.org/mailing-lists.html">Mailing Lists</a>: ask questions about Spark here</li> <li>AMP Camps: a series of training camps at UC Berkeley that featured talks and exercises about Spark, Spark Streaming, Mesos, and more. <a href="https://www.youtube.com/user/BerkeleyAMPLab/search?query=amp%20camp">Videos</a>, are available online for free.</li> <li><a href="https://spark.apache.org/examples.html">Code Examples</a>: more are also available in the <code class="language-plaintext highlighter-rouge">examples</code> subfolder of Spark (<a href="https://github.com/apache/spark/tree/master/examples/src/main/scala/org/apache/spark/examples">Scala</a>, <a href="https://github.com/apache/spark/tree/master/examples/src/main/java/org/apache/spark/examples">Java</a>, <a href="https://github.com/apache/spark/tree/master/examples/src/main/python">Python</a>, <a href="https://github.com/apache/spark/tree/master/examples/src/main/r">R</a>)</li> </ul> </div> <!-- /container --> </div> <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/js/bootstrap.bundle.min.js" integrity="sha384-MrcW6ZMFYlzcLA8Nl+NtUVF0sA7MsXsP1UyJoMp4YLEuNSfAP+JcXn/tWtIaxVXM" crossorigin="anonymous"></script> <script src="https://code.jquery.com/jquery.js"></script> <script src="js/vendor/anchor.min.js"></script> <script src="js/main.js"></script> <script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script> <script type="text/javascript"> // DocSearch is entirely free and automated. DocSearch is built in two parts: // 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link // in your website and extract content from every page it traverses. It then pushes this // content to an Algolia index. // 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index // to your search input and display its results in a dropdown UI. If you want to find more // details on how works DocSearch, check the docs of DocSearch. docsearch({ apiKey: 'd62f962a82bc9abb53471cb7b89da35e', appId: 'RAI69RXRSK', indexName: 'apache_spark', inputSelector: '#docsearch-input', enhancedSearchInput: true, algoliaOptions: { 'facetFilters': ["version:3.5.3"] }, debug: false // Set debug to true if you want to inspect the dropdown }); </script> <!-- MathJax Section --> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ TeX: { equationNumbers: { autoNumber: "AMS" } } }); </script> <script> // Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS. // We could use "//cdn.mathjax...", but that won't support "file://". (function(d, script) { script = d.createElement('script'); script.type = 'text/javascript'; script.async = true; script.onload = function(){ MathJax.Hub.Config({ tex2jax: { inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ], displayMath: [ ["$$","$$"], ["\\[", "\\]"] ], processEscapes: true, skipTags: ['script', 'noscript', 'style', 'textarea', 'pre'] } }); }; script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') + 'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' + '?config=TeX-AMS-MML_HTMLorMML'; d.getElementsByTagName('head')[0].appendChild(script); }(document)); </script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10