aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql/dataframe.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/sql/dataframe.py')
-rw-r--r--python/pyspark/sql/dataframe.py12
1 files changed, 12 insertions, 0 deletions
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index 328bda6601..bbe15f5f90 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -197,6 +197,18 @@ class DataFrame(object):
"""
return self._jdf.isLocal()
+ @property
+ @since(2.0)
+ def isStreaming(self):
+ """Returns true if this :class:`Dataset` contains one or more sources that continuously
+ return data as it arrives. A :class:`Dataset` that reads data from a streaming source
+ must be executed as a :class:`ContinuousQuery` using the :func:`startStream` method in
+ :class:`DataFrameWriter`. Methods that return a single answer, (e.g., :func:`count` or
+ :func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
+ source present.
+ """
+ return self._jdf.isStreaming()
+
@since(1.3)
def show(self, n=20, truncate=True):
"""Prints the first ``n`` rows to the console.