aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/__init__.py
diff options
context:
space:
mode:
authorDavies Liu <davies.liu@gmail.com>2014-09-03 11:49:45 -0700
committerJosh Rosen <joshrosen@apache.org>2014-09-03 11:49:45 -0700
commit6481d27425f6d42ead36663c9a4ef7ee13b3a8c9 (patch)
tree051c394c0735be33d4bb7f9fd90f403e9b5f2dcd /python/pyspark/__init__.py
parent6a72a36940311fcb3429bd34c8818bc7d513115c (diff)
downloadspark-6481d27425f6d42ead36663c9a4ef7ee13b3a8c9.tar.gz
spark-6481d27425f6d42ead36663c9a4ef7ee13b3a8c9.tar.bz2
spark-6481d27425f6d42ead36663c9a4ef7ee13b3a8c9.zip
[SPARK-3309] [PySpark] Put all public API in __all__
Put all public API in __all__, also put them all in pyspark.__init__.py, then we can got all the documents for public API by `pydoc pyspark`. It also can be used by other programs (such as Sphinx or Epydoc) to generate only documents for public APIs. Author: Davies Liu <davies.liu@gmail.com> Closes #2205 from davies/public and squashes the following commits: c6c5567 [Davies Liu] fix message f7b35be [Davies Liu] put SchemeRDD, Row in pyspark.sql module 7e3016a [Davies Liu] add __all__ in mllib 6281b48 [Davies Liu] fix doc for SchemaRDD 6caab21 [Davies Liu] add public interfaces into pyspark.__init__.py
Diffstat (limited to 'python/pyspark/__init__.py')
-rw-r--r--python/pyspark/__init__.py14
1 files changed, 9 insertions, 5 deletions
diff --git a/python/pyspark/__init__.py b/python/pyspark/__init__.py
index c58555fc9d..1a2e774738 100644
--- a/python/pyspark/__init__.py
+++ b/python/pyspark/__init__.py
@@ -61,13 +61,17 @@ sys.path.insert(0, s)
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
-from pyspark.sql import SQLContext
from pyspark.rdd import RDD
-from pyspark.sql import SchemaRDD
-from pyspark.sql import Row
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
+from pyspark.accumulators import Accumulator, AccumulatorParam
+from pyspark.broadcast import Broadcast
+from pyspark.serializers import MarshalSerializer, PickleSerializer
+# for back compatibility
+from pyspark.sql import SQLContext, HiveContext, SchemaRDD, Row
-__all__ = ["SparkConf", "SparkContext", "SQLContext", "RDD", "SchemaRDD",
- "SparkFiles", "StorageLevel", "Row"]
+__all__ = [
+ "SparkConf", "SparkContext", "SparkFiles", "RDD", "StorageLevel", "Broadcast",
+ "Accumulator", "AccumulatorParam", "MarshalSerializer", "PickleSerializer",
+]