diff options
author | Sean Owen <sowen@cloudera.com> | 2015-11-05 09:08:53 +0000 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2015-11-05 09:08:53 +0000 |
commit | 6f81eae24f83df51a99d4bb2629dd7daadc01519 (patch) | |
tree | 79b7d20c8381b97afb48cfd92ce940297a7f6ea5 /streaming/src/main/scala | |
parent | 81498dd5c86ca51d2fb351c8ef52cbb28e6844f4 (diff) | |
download | spark-6f81eae24f83df51a99d4bb2629dd7daadc01519.tar.gz spark-6f81eae24f83df51a99d4bb2629dd7daadc01519.tar.bz2 spark-6f81eae24f83df51a99d4bb2629dd7daadc01519.zip |
[SPARK-11440][CORE][STREAMING][BUILD] Declare rest of @Experimental items non-experimental if they've existed since 1.2.0
Remove `Experimental` annotations in core, streaming for items that existed in 1.2.0 or before. The changes are:
* SparkContext
* binary{Files,Records} : 1.2.0
* submitJob : 1.0.0
* JavaSparkContext
* binary{Files,Records} : 1.2.0
* DoubleRDDFunctions, JavaDoubleRDD
* {mean,sum}Approx : 1.0.0
* PairRDDFunctions, JavaPairRDD
* sampleByKeyExact : 1.2.0
* countByKeyApprox : 1.0.0
* PairRDDFunctions
* countApproxDistinctByKey : 1.1.0
* RDD
* countApprox, countByValueApprox, countApproxDistinct : 1.0.0
* JavaRDDLike
* countApprox : 1.0.0
* PythonHadoopUtil.Converter : 1.1.0
* PortableDataStream : 1.2.0 (related to binaryFiles)
* BoundedDouble : 1.0.0
* PartialResult : 1.0.0
* StreamingContext, JavaStreamingContext
* binaryRecordsStream : 1.2.0
* HiveContext
* analyze : 1.2.0
Author: Sean Owen <sowen@cloudera.com>
Closes #9396 from srowen/SPARK-11440.
Diffstat (limited to 'streaming/src/main/scala')
-rw-r--r-- | streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala | 3 | ||||
-rw-r--r-- | streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala | 3 |
2 files changed, 0 insertions, 6 deletions
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala index 051f53de64..97113835f3 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala @@ -445,8 +445,6 @@ class StreamingContext private[streaming] ( } /** - * :: Experimental :: - * * Create an input stream that monitors a Hadoop-compatible filesystem * for new files and reads them as flat binary files, assuming a fixed length per record, * generating one byte array per record. Files must be written to the monitored directory @@ -459,7 +457,6 @@ class StreamingContext private[streaming] ( * @param directory HDFS directory to monitor for new file * @param recordLength length of each record in bytes */ - @Experimental def binaryRecordsStream( directory: String, recordLength: Int): DStream[Array[Byte]] = withNamedScope("binary records stream") { diff --git a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala index 13f371f296..8f21c79a76 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala @@ -222,8 +222,6 @@ class JavaStreamingContext(val ssc: StreamingContext) extends Closeable { } /** - * :: Experimental :: - * * Create an input stream that monitors a Hadoop-compatible filesystem * for new files and reads them as flat binary files with fixed record lengths, * yielding byte arrays @@ -234,7 +232,6 @@ class JavaStreamingContext(val ssc: StreamingContext) extends Closeable { * @param directory HDFS directory to monitor for new files * @param recordLength The length at which to split the records */ - @Experimental def binaryRecordsStream(directory: String, recordLength: Int): JavaDStream[Array[Byte]] = { ssc.binaryRecordsStream(directory, recordLength) } |