aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/rdd.py
diff options
context:
space:
mode:
authorDavies Liu <davies.liu@gmail.com>2014-10-07 18:09:27 -0700
committerJosh Rosen <joshrosen@apache.org>2014-10-07 18:09:27 -0700
commit798ed22c289cf65f2249bf2f4250285685ca69e7 (patch)
tree137d93c32454aaf39e6416823a8604f816f73926 /python/pyspark/rdd.py
parentb69c9fb6fb048509bbd8430fb697dc3a5ca4fe59 (diff)
downloadspark-798ed22c289cf65f2249bf2f4250285685ca69e7.tar.gz
spark-798ed22c289cf65f2249bf2f4250285685ca69e7.tar.bz2
spark-798ed22c289cf65f2249bf2f4250285685ca69e7.zip
[SPARK-3412] [PySpark] Replace Epydoc with Sphinx to generate Python API docs
Retire Epydoc, use Sphinx to generate API docs. Refine Sphinx docs, also convert some docstrings into Sphinx style. It looks like: ![api doc](https://cloud.githubusercontent.com/assets/40902/4538272/9e2d4f10-4dec-11e4-8d96-6e45a8fe51f9.png) Author: Davies Liu <davies.liu@gmail.com> Closes #2689 from davies/docs and squashes the following commits: bf4a0a5 [Davies Liu] fix links 3fb1572 [Davies Liu] fix _static in jekyll 65a287e [Davies Liu] fix scripts and logo 8524042 [Davies Liu] Merge branch 'master' of github.com:apache/spark into docs d5b874a [Davies Liu] Merge branch 'master' of github.com:apache/spark into docs 4bc1c3c [Davies Liu] refactor 746d0b6 [Davies Liu] @param -> :param 240b393 [Davies Liu] replace epydoc with sphinx doc
Diffstat (limited to 'python/pyspark/rdd.py')
-rw-r--r--python/pyspark/rdd.py52
1 files changed, 26 insertions, 26 deletions
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index e77669aad7..6797d50659 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -752,7 +752,7 @@ class RDD(object):
"""
Find the maximum item in this RDD.
- @param key: A function used to generate key for comparing
+ :param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
@@ -768,7 +768,7 @@ class RDD(object):
"""
Find the minimum item in this RDD.
- @param key: A function used to generate key for comparing
+ :param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
@@ -1115,9 +1115,9 @@ class RDD(object):
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
- @param conf: Hadoop job configuration, passed in as a dict
- @param keyConverter: (None by default)
- @param valueConverter: (None by default)
+ :param conf: Hadoop job configuration, passed in as a dict
+ :param keyConverter: (None by default)
+ :param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._toPickleSerialization()
@@ -1135,16 +1135,16 @@ class RDD(object):
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
- @param path: path to Hadoop file
- @param outputFormatClass: fully qualified classname of Hadoop OutputFormat
+ :param path: path to Hadoop file
+ :param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
- @param keyClass: fully qualified classname of key Writable class
+ :param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
- @param valueClass: fully qualified classname of value Writable class
+ :param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
- @param keyConverter: (None by default)
- @param valueConverter: (None by default)
- @param conf: Hadoop job configuration, passed in as a dict (None by default)
+ :param keyConverter: (None by default)
+ :param valueConverter: (None by default)
+ :param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._toPickleSerialization()
@@ -1161,9 +1161,9 @@ class RDD(object):
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
- @param conf: Hadoop job configuration, passed in as a dict
- @param keyConverter: (None by default)
- @param valueConverter: (None by default)
+ :param conf: Hadoop job configuration, passed in as a dict
+ :param keyConverter: (None by default)
+ :param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._toPickleSerialization()
@@ -1182,17 +1182,17 @@ class RDD(object):
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
- @param path: path to Hadoop file
- @param outputFormatClass: fully qualified classname of Hadoop OutputFormat
+ :param path: path to Hadoop file
+ :param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
- @param keyClass: fully qualified classname of key Writable class
+ :param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
- @param valueClass: fully qualified classname of value Writable class
+ :param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
- @param keyConverter: (None by default)
- @param valueConverter: (None by default)
- @param conf: (None by default)
- @param compressionCodecClass: (None by default)
+ :param keyConverter: (None by default)
+ :param valueConverter: (None by default)
+ :param conf: (None by default)
+ :param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._toPickleSerialization()
@@ -1212,8 +1212,8 @@ class RDD(object):
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
- @param path: path to sequence file
- @param compressionCodecClass: (None by default)
+ :param path: path to sequence file
+ :param compressionCodecClass: (None by default)
"""
pickledRDD = self._toPickleSerialization()
batched = isinstance(pickledRDD._jrdd_deserializer, BatchedSerializer)
@@ -2009,7 +2009,7 @@ class RDD(object):
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
- @param relativeSD Relative accuracy. Smaller values create
+ :param relativeSD Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.