aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql/readwriter.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/sql/readwriter.py')
-rw-r--r--python/pyspark/sql/readwriter.py8
1 files changed, 4 insertions, 4 deletions
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index f7c354f513..4020bb3fa4 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -96,7 +96,7 @@ class DataFrameReader(OptionUtils):
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
- :param schema: a StructType object
+ :param schema: a :class:`pyspark.sql.types.StructType` object
"""
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
@@ -125,7 +125,7 @@ class DataFrameReader(OptionUtils):
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
- :param schema: optional :class:`StructType` for the input schema.
+ :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema.
:param options: all other string options
>>> df = spark.read.load('python/test_support/sql/parquet_partitioned', opt1=True,
@@ -166,7 +166,7 @@ class DataFrameReader(OptionUtils):
:param path: string represents path to the JSON dataset,
or RDD of Strings storing JSON objects.
- :param schema: an optional :class:`StructType` for the input schema.
+ :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
@@ -294,7 +294,7 @@ class DataFrameReader(OptionUtils):
``inferSchema`` option or specify the schema explicitly using ``schema``.
:param path: string, or list of strings, for input path(s).
- :param schema: an optional :class:`StructType` for the input schema.
+ :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param sep: sets the single character as a separator for each field and value.
If None is set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,