aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala29
1 files changed, 0 insertions, 29 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
index f70c7d08a6..b959444b49 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
@@ -37,7 +37,6 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
private val df = ds.toDF()
/**
- * :: Experimental ::
* Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
* - `OutputMode.Append()`: only the new rows in the streaming DataFrame/Dataset will be
* written to the sink
@@ -46,15 +45,12 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
*
* @since 2.0.0
*/
- @Experimental
def outputMode(outputMode: OutputMode): DataStreamWriter[T] = {
this.outputMode = outputMode
this
}
-
/**
- * :: Experimental ::
* Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
* - `append`: only the new rows in the streaming DataFrame/Dataset will be written to
* the sink
@@ -63,7 +59,6 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
*
* @since 2.0.0
*/
- @Experimental
def outputMode(outputMode: String): DataStreamWriter[T] = {
this.outputMode = outputMode.toLowerCase match {
case "append" =>
@@ -78,7 +73,6 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
}
/**
- * :: Experimental ::
* Set the trigger for the stream query. The default value is `ProcessingTime(0)` and it will run
* the query as fast as possible.
*
@@ -100,7 +94,6 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
*
* @since 2.0.0
*/
- @Experimental
def trigger(trigger: Trigger): DataStreamWriter[T] = {
this.trigger = trigger
this
@@ -108,25 +101,21 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
/**
- * :: Experimental ::
* Specifies the name of the [[StreamingQuery]] that can be started with `start()`.
* This name must be unique among all the currently active queries in the associated SQLContext.
*
* @since 2.0.0
*/
- @Experimental
def queryName(queryName: String): DataStreamWriter[T] = {
this.extraOptions += ("queryName" -> queryName)
this
}
/**
- * :: Experimental ::
* Specifies the underlying output data source. Built-in options include "parquet" for now.
*
* @since 2.0.0
*/
- @Experimental
def format(source: String): DataStreamWriter[T] = {
this.source = source
this
@@ -156,90 +145,74 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
}
/**
- * :: Experimental ::
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
- @Experimental
def option(key: String, value: String): DataStreamWriter[T] = {
this.extraOptions += (key -> value)
this
}
/**
- * :: Experimental ::
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
- @Experimental
def option(key: String, value: Boolean): DataStreamWriter[T] = option(key, value.toString)
/**
- * :: Experimental ::
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
- @Experimental
def option(key: String, value: Long): DataStreamWriter[T] = option(key, value.toString)
/**
- * :: Experimental ::
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
- @Experimental
def option(key: String, value: Double): DataStreamWriter[T] = option(key, value.toString)
/**
- * :: Experimental ::
* (Scala-specific) Adds output options for the underlying data source.
*
* @since 2.0.0
*/
- @Experimental
def options(options: scala.collection.Map[String, String]): DataStreamWriter[T] = {
this.extraOptions ++= options
this
}
/**
- * :: Experimental ::
* Adds output options for the underlying data source.
*
* @since 2.0.0
*/
- @Experimental
def options(options: java.util.Map[String, String]): DataStreamWriter[T] = {
this.options(options.asScala)
this
}
/**
- * :: Experimental ::
* Starts the execution of the streaming query, which will continually output results to the given
* path as new data arrives. The returned [[StreamingQuery]] object can be used to interact with
* the stream.
*
* @since 2.0.0
*/
- @Experimental
def start(path: String): StreamingQuery = {
option("path", path).start()
}
/**
- * :: Experimental ::
* Starts the execution of the streaming query, which will continually output results to the given
* path as new data arrives. The returned [[StreamingQuery]] object can be used to interact with
* the stream.
*
* @since 2.0.0
*/
- @Experimental
def start(): StreamingQuery = {
if (source == "memory") {
assertNotPartitioned("memory")
@@ -297,7 +270,6 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
}
/**
- * :: Experimental ::
* Starts the execution of the streaming query, which will continually send results to the given
* [[ForeachWriter]] as as new data arrives. The [[ForeachWriter]] can be used to send the data
* generated by the [[DataFrame]]/[[Dataset]] to an external system.
@@ -343,7 +315,6 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
*
* @since 2.0.0
*/
- @Experimental
def foreach(writer: ForeachWriter[T]): DataStreamWriter[T] = {
this.source = "foreach"
this.foreachWriter = if (writer != null) {