aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql
diff options
context:
space:
mode:
authorZheng RuiFeng <ruifengz@foxmail.com>2016-06-06 09:35:47 +0100
committerSean Owen <sowen@cloudera.com>2016-06-06 09:35:47 +0100
commitfd8af397132fa1415a4c19d7f5cb5a41aa6ddb27 (patch)
treea653b3542d0671c8cb8b3ff7fa3755525c0606a4 /python/pyspark/sql
parent32f2f95dbdfb21491e46d4b608fd4e8ac7ab8973 (diff)
downloadspark-fd8af397132fa1415a4c19d7f5cb5a41aa6ddb27.tar.gz
spark-fd8af397132fa1415a4c19d7f5cb5a41aa6ddb27.tar.bz2
spark-fd8af397132fa1415a4c19d7f5cb5a41aa6ddb27.zip
[MINOR] Fix Typos 'an -> a'
## What changes were proposed in this pull request? `an -> a` Use cmds like `find . -name '*.R' | xargs -i sh -c "grep -in ' an [^aeiou]' {} && echo {}"` to generate candidates, and review them one by one. ## How was this patch tested? manual tests Author: Zheng RuiFeng <ruifengz@foxmail.com> Closes #13515 from zhengruifeng/an_a.
Diffstat (limited to 'python/pyspark/sql')
-rw-r--r--python/pyspark/sql/session.py2
-rw-r--r--python/pyspark/sql/streaming.py2
-rw-r--r--python/pyspark/sql/types.py2
3 files changed, 3 insertions, 3 deletions
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 8f7dcb54a7..7c9f532f94 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -360,7 +360,7 @@ class SparkSession(object):
def _createFromLocal(self, data, schema):
"""
- Create an RDD for DataFrame from an list or pandas.DataFrame, returns
+ Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py
index cd75622ced..580aba651f 100644
--- a/python/pyspark/sql/streaming.py
+++ b/python/pyspark/sql/streaming.py
@@ -72,7 +72,7 @@ class ContinuousQuery(object):
@since(2.0)
def processAllAvailable(self):
- """Blocks until all available data in the source has been processed an committed to the
+ """Blocks until all available data in the source has been processed and committed to the
sink. This method is intended for testing. Note that in the case of continually arriving
data, this method may block forever. Additionally, this method is only guaranteed to block
until data that has been synchronously appended data to a stream source prior to invocation.
diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py
index 7d8d0230b4..bb2b95404a 100644
--- a/python/pyspark/sql/types.py
+++ b/python/pyspark/sql/types.py
@@ -1046,7 +1046,7 @@ def _need_converter(dataType):
def _create_converter(dataType):
- """Create an converter to drop the names of fields in obj """
+ """Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x