aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql
diff options
context:
space:
mode:
authorNicholas Chammas <nicholas.chammas@gmail.com>2016-07-29 14:07:03 -0700
committerReynold Xin <rxin@databricks.com>2016-07-29 14:07:03 -0700
commit2182e4322da6ba732f99ae75dce00f76f1cdc4d9 (patch)
tree1fbb2ec619487844e174e56dcc8c16533fb29b99 /python/pyspark/sql
parent2c15323ad026da64caa68787c5d103a8595f63a0 (diff)
downloadspark-2182e4322da6ba732f99ae75dce00f76f1cdc4d9.tar.gz
spark-2182e4322da6ba732f99ae75dce00f76f1cdc4d9.tar.bz2
spark-2182e4322da6ba732f99ae75dce00f76f1cdc4d9.zip
[SPARK-16772][PYTHON][DOCS] Restore "datatype string" to Python API docstrings
## What changes were proposed in this pull request? This PR corrects [an error made in an earlier PR](https://github.com/apache/spark/pull/14393/files#r72843069). ## How was this patch tested? ```sh $ ./dev/lint-python PEP8 checks passed. rm -rf _build/* pydoc checks passed. ``` I also built the docs and confirmed that they looked good in my browser. Author: Nicholas Chammas <nicholas.chammas@gmail.com> Closes #14408 from nchammas/SPARK-16772.
Diffstat (limited to 'python/pyspark/sql')
-rw-r--r--python/pyspark/sql/context.py10
-rw-r--r--python/pyspark/sql/session.py10
2 files changed, 8 insertions, 12 deletions
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index f7009fe589..4085f165f4 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -226,9 +226,8 @@ class SQLContext(object):
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
- When ``schema`` is :class:`pyspark.sql.types.DataType` or
- :class:`pyspark.sql.types.StringType`, it must match the
- real data, or an exception will be thrown at runtime. If the given schema is not
+ When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
+ the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
@@ -239,8 +238,7 @@ class SQLContext(object):
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
- :param schema: a :class:`pyspark.sql.types.DataType` or a
- :class:`pyspark.sql.types.StringType` or a list of
+ :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
@@ -251,7 +249,7 @@ class SQLContext(object):
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
- :class:`pyspark.sql.types.StringType` after 2.0.
+ datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 10bd89b03f..2dacf483fc 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -414,9 +414,8 @@ class SparkSession(object):
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
- When ``schema`` is :class:`pyspark.sql.types.DataType` or
- :class:`pyspark.sql.types.StringType`, it must match the
- real data, or an exception will be thrown at runtime. If the given schema is not
+ When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
+ the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
@@ -426,8 +425,7 @@ class SparkSession(object):
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
- :param schema: a :class:`pyspark.sql.types.DataType` or a
- :class:`pyspark.sql.types.StringType` or a list of
+ :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
@@ -438,7 +436,7 @@ class SparkSession(object):
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
- :class:`pyspark.sql.types.StringType` after 2.0. If it's not a
+ datatype string after 2.0. If it's not a
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.