aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/sql')
-rw-r--r--python/pyspark/sql/session.py2
-rw-r--r--python/pyspark/sql/streaming.py2
-rw-r--r--python/pyspark/sql/types.py2
3 files changed, 3 insertions, 3 deletions
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 8f7dcb54a7..7c9f532f94 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -360,7 +360,7 @@ class SparkSession(object):
def _createFromLocal(self, data, schema):
"""
- Create an RDD for DataFrame from an list or pandas.DataFrame, returns
+ Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py
index cd75622ced..580aba651f 100644
--- a/python/pyspark/sql/streaming.py
+++ b/python/pyspark/sql/streaming.py
@@ -72,7 +72,7 @@ class ContinuousQuery(object):
@since(2.0)
def processAllAvailable(self):
- """Blocks until all available data in the source has been processed an committed to the
+ """Blocks until all available data in the source has been processed and committed to the
sink. This method is intended for testing. Note that in the case of continually arriving
data, this method may block forever. Additionally, this method is only guaranteed to block
until data that has been synchronously appended data to a stream source prior to invocation.
diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py
index 7d8d0230b4..bb2b95404a 100644
--- a/python/pyspark/sql/types.py
+++ b/python/pyspark/sql/types.py
@@ -1046,7 +1046,7 @@ def _need_converter(dataType):
def _create_converter(dataType):
- """Create an converter to drop the names of fields in obj """
+ """Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x