From 5545b791096756b07b3207fb3de13b68b9a37b00 Mon Sep 17 00:00:00 2001 From: Burak Yavuz Date: Tue, 28 Jun 2016 17:02:16 -0700 Subject: [MINOR][DOCS][STRUCTURED STREAMING] Minor doc fixes around `DataFrameWriter` and `DataStreamWriter` ## What changes were proposed in this pull request? Fixes a couple old references to `DataFrameWriter.startStream` to `DataStreamWriter.start Author: Burak Yavuz Closes #13952 from brkyvz/minor-doc-fix. --- python/pyspark/sql/dataframe.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'python') diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py index a2443ed3d6..4f13307820 100644 --- a/python/pyspark/sql/dataframe.py +++ b/python/pyspark/sql/dataframe.py @@ -257,8 +257,8 @@ class DataFrame(object): def isStreaming(self): """Returns true if this :class:`Dataset` contains one or more sources that continuously return data as it arrives. A :class:`Dataset` that reads data from a streaming source - must be executed as a :class:`StreamingQuery` using the :func:`startStream` method in - :class:`DataFrameWriter`. Methods that return a single answer, (e.g., :func:`count` or + must be executed as a :class:`StreamingQuery` using the :func:`start` method in + :class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or :func:`collect`) will throw an :class:`AnalysisException` when there is a streaming source present. -- cgit v1.2.3