aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorlewuathe <lewuathe@me.com>2014-12-17 17:31:24 -0800
committerXiangrui Meng <meng@databricks.com>2014-12-17 17:31:24 -0800
commit3cd516191baadf8496ccdae499771020e89acd7e (patch)
tree879897b87f1874368f317b6586ff5651ec8e89a0 /python
parentca1260891adb87f4985d3cfc515b4756644630d0 (diff)
downloadspark-3cd516191baadf8496ccdae499771020e89acd7e.tar.gz
spark-3cd516191baadf8496ccdae499771020e89acd7e.tar.bz2
spark-3cd516191baadf8496ccdae499771020e89acd7e.zip
[SPARK-4822] Use sphinx tags for Python doc annotations
Modify python annotations for sphinx. There is no change to build process from. https://github.com/apache/spark/blob/master/docs/README.md Author: lewuathe <lewuathe@me.com> Closes #3685 from Lewuathe/sphinx-tag-for-pydoc and squashes the following commits: 88a0fd9 [lewuathe] [SPARK-4822] Fix DevelopApi and WARN tags 3d7a398 [lewuathe] [SPARK-4822] Use sphinx tags for Python doc annotations
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/context.py4
-rw-r--r--python/pyspark/mllib/classification.py4
-rw-r--r--python/pyspark/mllib/feature.py12
-rw-r--r--python/pyspark/mllib/stat.py4
-rw-r--r--python/pyspark/rdd.py8
-rw-r--r--python/pyspark/sql.py2
6 files changed, 17 insertions, 17 deletions
diff --git a/python/pyspark/context.py b/python/pyspark/context.py
index ed7351d60c..593d74bca5 100644
--- a/python/pyspark/context.py
+++ b/python/pyspark/context.py
@@ -407,7 +407,7 @@ class SparkContext(object):
def binaryFiles(self, path, minPartitions=None):
"""
- :: Experimental ::
+ .. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
@@ -424,7 +424,7 @@ class SparkContext(object):
def binaryRecords(self, path, recordLength):
"""
- :: Experimental ::
+ .. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
diff --git a/python/pyspark/mllib/classification.py b/python/pyspark/mllib/classification.py
index f14d0ed11c..00e2e76711 100644
--- a/python/pyspark/mllib/classification.py
+++ b/python/pyspark/mllib/classification.py
@@ -41,7 +41,7 @@ class LinearBinaryClassificationModel(LinearModel):
def setThreshold(self, value):
"""
- :: Experimental ::
+ .. note:: Experimental
Sets the threshold that separates positive predictions from negative
predictions. An example with prediction score greater than or equal
@@ -51,7 +51,7 @@ class LinearBinaryClassificationModel(LinearModel):
def clearThreshold(self):
"""
- :: Experimental ::
+ .. note:: Experimental
Clears the threshold so that `predict` will output raw prediction scores.
"""
diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py
index e46af20886..10df628806 100644
--- a/python/pyspark/mllib/feature.py
+++ b/python/pyspark/mllib/feature.py
@@ -36,7 +36,7 @@ __all__ = ['Normalizer', 'StandardScalerModel', 'StandardScaler',
class VectorTransformer(object):
"""
- :: DeveloperApi ::
+ .. note:: DeveloperApi
Base class for transformation of a vector or RDD of vector
"""
@@ -51,7 +51,7 @@ class VectorTransformer(object):
class Normalizer(VectorTransformer):
"""
- :: Experimental ::
+ .. note:: Experimental
Normalizes samples individually to unit L\ :sup:`p`\ norm
@@ -112,7 +112,7 @@ class JavaVectorTransformer(JavaModelWrapper, VectorTransformer):
class StandardScalerModel(JavaVectorTransformer):
"""
- :: Experimental ::
+ .. note:: Experimental
Represents a StandardScaler model that can transform vectors.
"""
@@ -129,7 +129,7 @@ class StandardScalerModel(JavaVectorTransformer):
class StandardScaler(object):
"""
- :: Experimental ::
+ .. note:: Experimental
Standardizes features by removing the mean and scaling to unit
variance using column summary statistics on the samples in the
@@ -172,7 +172,7 @@ class StandardScaler(object):
class HashingTF(object):
"""
- :: Experimental ::
+ .. note:: Experimental
Maps a sequence of terms to their term frequencies using the hashing trick.
@@ -232,7 +232,7 @@ class IDFModel(JavaVectorTransformer):
class IDF(object):
"""
- :: Experimental ::
+ .. note:: Experimental
Inverse document frequency (IDF).
diff --git a/python/pyspark/mllib/stat.py b/python/pyspark/mllib/stat.py
index 1980f5b03f..c8af777a8b 100644
--- a/python/pyspark/mllib/stat.py
+++ b/python/pyspark/mllib/stat.py
@@ -55,7 +55,7 @@ class MultivariateStatisticalSummary(JavaModelWrapper):
class ChiSqTestResult(JavaModelWrapper):
"""
- :: Experimental ::
+ .. note:: Experimental
Object containing the test results for the chi-squared hypothesis test.
"""
@@ -200,7 +200,7 @@ class Statistics(object):
@staticmethod
def chiSqTest(observed, expected=None):
"""
- :: Experimental ::
+ .. note:: Experimental
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index bd2ff00c0f..c1120cf781 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -1964,7 +1964,7 @@ class RDD(object):
def countApprox(self, timeout, confidence=0.95):
"""
- :: Experimental ::
+ .. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
@@ -1977,7 +1977,7 @@ class RDD(object):
def sumApprox(self, timeout, confidence=0.95):
"""
- :: Experimental ::
+ .. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
@@ -1993,7 +1993,7 @@ class RDD(object):
def meanApprox(self, timeout, confidence=0.95):
"""
- :: Experimental ::
+ .. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
@@ -2009,7 +2009,7 @@ class RDD(object):
def countApproxDistinct(self, relativeSD=0.05):
"""
- :: Experimental ::
+ .. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index 1ee0b28a32..469f82473a 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -420,7 +420,7 @@ class StructType(DataType):
class UserDefinedType(DataType):
"""
- :: WARN: Spark Internal Use Only ::
+ .. note:: WARN: Spark Internal Use Only
SQL User-Defined Type (UDT).
"""