aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/building-spark.md6
-rw-r--r--docs/streaming-flume-integration.md2
-rw-r--r--external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeUtils.scala8
3 files changed, 4 insertions, 12 deletions
diff --git a/docs/building-spark.md b/docs/building-spark.md
index 088da7da49..4c3988e819 100644
--- a/docs/building-spark.md
+++ b/docs/building-spark.md
@@ -111,9 +111,9 @@ To produce a Spark package compiled with Scala 2.11, use the `-Dscala-2.11` prop
dev/change-version-to-2.11.sh
mvn -Pyarn -Phadoop-2.4 -Dscala-2.11 -DskipTests clean package
-Scala 2.11 support in Spark is experimental and does not support a few features.
-Specifically, Spark's external Kafka library and JDBC component are not yet
-supported in Scala 2.11 builds.
+Scala 2.11 support in Spark does not support a few features due to dependencies
+which are themselves not Scala 2.11 ready. Specifically, Spark's external
+Kafka library and JDBC component are not yet supported in Scala 2.11 builds.
# Spark Tests in Maven
diff --git a/docs/streaming-flume-integration.md b/docs/streaming-flume-integration.md
index ac01dd3d80..40e17246fe 100644
--- a/docs/streaming-flume-integration.md
+++ b/docs/streaming-flume-integration.md
@@ -64,7 +64,7 @@ configuring Flume agents.
3. **Deploying:** Package `spark-streaming-flume_{{site.SCALA_BINARY_VERSION}}` and its dependencies (except `spark-core_{{site.SCALA_BINARY_VERSION}}` and `spark-streaming_{{site.SCALA_BINARY_VERSION}}` which are provided by `spark-submit`) into the application JAR. Then use `spark-submit` to launch your application (see [Deploying section](streaming-programming-guide.html#deploying-applications) in the main programming guide).
-## Approach 2 (Experimental): Pull-based Approach using a Custom Sink
+## Approach 2: Pull-based Approach using a Custom Sink
Instead of Flume pushing data directly to Spark Streaming, this approach runs a custom Flume sink that allows the following.
- Flume pushes data into the sink, and the data stays buffered.
diff --git a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeUtils.scala b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeUtils.scala
index 4b732c1592..44dec45c22 100644
--- a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeUtils.scala
+++ b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeUtils.scala
@@ -19,7 +19,6 @@ package org.apache.spark.streaming.flume
import java.net.InetSocketAddress
-import org.apache.spark.annotation.Experimental
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
@@ -121,7 +120,6 @@ object FlumeUtils {
* @param port Port of the host at which the Spark Sink is listening
* @param storageLevel Storage level to use for storing the received objects
*/
- @Experimental
def createPollingStream(
ssc: StreamingContext,
hostname: String,
@@ -138,7 +136,6 @@ object FlumeUtils {
* @param addresses List of InetSocketAddresses representing the hosts to connect to.
* @param storageLevel Storage level to use for storing the received objects
*/
- @Experimental
def createPollingStream(
ssc: StreamingContext,
addresses: Seq[InetSocketAddress],
@@ -159,7 +156,6 @@ object FlumeUtils {
* result in this stream using more threads
* @param storageLevel Storage level to use for storing the received objects
*/
- @Experimental
def createPollingStream(
ssc: StreamingContext,
addresses: Seq[InetSocketAddress],
@@ -178,7 +174,6 @@ object FlumeUtils {
* @param hostname Hostname of the host on which the Spark Sink is running
* @param port Port of the host at which the Spark Sink is listening
*/
- @Experimental
def createPollingStream(
jssc: JavaStreamingContext,
hostname: String,
@@ -195,7 +190,6 @@ object FlumeUtils {
* @param port Port of the host at which the Spark Sink is listening
* @param storageLevel Storage level to use for storing the received objects
*/
- @Experimental
def createPollingStream(
jssc: JavaStreamingContext,
hostname: String,
@@ -212,7 +206,6 @@ object FlumeUtils {
* @param addresses List of InetSocketAddresses on which the Spark Sink is running.
* @param storageLevel Storage level to use for storing the received objects
*/
- @Experimental
def createPollingStream(
jssc: JavaStreamingContext,
addresses: Array[InetSocketAddress],
@@ -233,7 +226,6 @@ object FlumeUtils {
* result in this stream using more threads
* @param storageLevel Storage level to use for storing the received objects
*/
- @Experimental
def createPollingStream(
jssc: JavaStreamingContext,
addresses: Array[InetSocketAddress],