aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql/context.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/sql/context.py')
-rw-r--r--python/pyspark/sql/context.py44
1 files changed, 25 insertions, 19 deletions
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index 0debcf1de4..f7009fe589 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -152,9 +152,9 @@ class SQLContext(object):
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
- Create a :class:`DataFrame` with single LongType column named `id`,
- containing elements in a range from `start` to `end` (exclusive) with
- step value `step`.
+ Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
+ ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
+ step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
@@ -184,7 +184,7 @@ class SQLContext(object):
:param name: name of the UDF
:param f: python function
- :param returnType: a :class:`DataType` object
+ :param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
@@ -209,7 +209,7 @@ class SQLContext(object):
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
- :return: StructType
+ :return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@@ -226,28 +226,34 @@ class SQLContext(object):
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
- When ``schema`` is :class:`DataType` or datatype string, it must match the real data, or
- exception will be thrown at runtime. If the given schema is not StructType, it will be
- wrapped into a StructType as its only field, and the field name will be "value", each record
- will also be wrapped into a tuple, which can be converted to row later.
+ When ``schema`` is :class:`pyspark.sql.types.DataType` or
+ :class:`pyspark.sql.types.StringType`, it must match the
+ real data, or an exception will be thrown at runtime. If the given schema is not
+ :class:`pyspark.sql.types.StructType`, it will be wrapped into a
+ :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
+ each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
- :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
- etc.), or :class:`list`, or :class:`pandas.DataFrame`.
- :param schema: a :class:`DataType` or a datatype string or a list of column names, default
- is None. The data type string format equals to `DataType.simpleString`, except that
- top level struct type can omit the `struct<>` and atomic types use `typeName()` as
- their format, e.g. use `byte` instead of `tinyint` for ByteType. We can also use `int`
- as a short name for IntegerType.
+ :param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
+ :class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
+ :class:`pandas.DataFrame`.
+ :param schema: a :class:`pyspark.sql.types.DataType` or a
+ :class:`pyspark.sql.types.StringType` or a list of
+ column names, default is None. The data type string format equals to
+ :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
+ omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
+ ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
+ We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:return: :class:`DataFrame`
.. versionchanged:: 2.0
- The schema parameter can be a DataType or a datatype string after 2.0. If it's not a
- StructType, it will be wrapped into a StructType and each record will also be wrapped
- into a tuple.
+ The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
+ :class:`pyspark.sql.types.StringType` after 2.0.
+ If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
+ :class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()