aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorJoseph K. Bradley <joseph@databricks.com>2014-12-17 14:12:46 -0800
committerXiangrui Meng <meng@databricks.com>2014-12-17 14:12:56 -0800
commitf305e7db22d4a30366d838b1e77b5dcaaf7eef68 (patch)
treeb50660c5b69c99495d5d98a68dd10577c4293c2e /python
parent0429ec3089afc03064f8ad4608b951ef324f34d8 (diff)
downloadspark-f305e7db22d4a30366d838b1e77b5dcaaf7eef68.tar.gz
spark-f305e7db22d4a30366d838b1e77b5dcaaf7eef68.tar.bz2
spark-f305e7db22d4a30366d838b1e77b5dcaaf7eef68.zip
[SPARK-4821] [mllib] [python] [docs] Fix for pyspark.mllib.rand doc
+ small doc edit + include edit to make IntelliJ happy CC: davies mengxr Note to davies -- this does not fix the "WARNING: Literal block expected; none found." warnings since that seems to involve spacing which IntelliJ does not like. (Those warnings occur when generating the Python docs.) Author: Joseph K. Bradley <joseph@databricks.com> Closes #3669 from jkbradley/python-warnings and squashes the following commits: 4587868 [Joseph K. Bradley] fixed warning 8cb073c [Joseph K. Bradley] Updated based on davies recommendation c51eca4 [Joseph K. Bradley] Updated rst file for pyspark.mllib.rand doc. Small doc edit. Small include edit to make IntelliJ happy. (cherry picked from commit affc3f460fc6172b6cea88a8779d6d40166c1c6b) Signed-off-by: Xiangrui Meng <meng@databricks.com>
Diffstat (limited to 'python')
-rw-r--r--python/docs/pyspark.streaming.rst2
-rw-r--r--python/pyspark/mllib/__init__.py27
-rw-r--r--python/pyspark/mllib/feature.py6
3 files changed, 5 insertions, 30 deletions
diff --git a/python/docs/pyspark.streaming.rst b/python/docs/pyspark.streaming.rst
index 5024d694b6..f08185627d 100644
--- a/python/docs/pyspark.streaming.rst
+++ b/python/docs/pyspark.streaming.rst
@@ -1,5 +1,5 @@
pyspark.streaming module
-==================
+========================
Module contents
---------------
diff --git a/python/pyspark/mllib/__init__.py b/python/pyspark/mllib/__init__.py
index 5030a655fc..c3217620e3 100644
--- a/python/pyspark/mllib/__init__.py
+++ b/python/pyspark/mllib/__init__.py
@@ -32,29 +32,4 @@ import sys
import rand as random
random.__name__ = 'random'
random.RandomRDDs.__module__ = __name__ + '.random'
-
-
-class RandomModuleHook(object):
- """
- Hook to import pyspark.mllib.random
- """
- fullname = __name__ + '.random'
-
- def find_module(self, name, path=None):
- # skip all other modules
- if not name.startswith(self.fullname):
- return
- return self
-
- def load_module(self, name):
- if name == self.fullname:
- return random
-
- cname = name.rsplit('.', 1)[-1]
- try:
- return getattr(random, cname)
- except AttributeError:
- raise ImportError
-
-
-sys.meta_path.append(RandomModuleHook())
+sys.modules[__name__ + '.random'] = random
diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py
index 8cb992df2d..7f53213927 100644
--- a/python/pyspark/mllib/feature.py
+++ b/python/pyspark/mllib/feature.py
@@ -53,10 +53,10 @@ class Normalizer(VectorTransformer):
"""
:: Experimental ::
- Normalizes samples individually to unit L\ :sup:`p`\ norm
+ Normalizes samples individually to unit L\ :sup:`p`\ norm
- For any 1 <= `p` <= float('inf'), normalizes samples using
- sum(abs(vector). :sup:`p`) :sup:`(1/p)` as norm.
+ For any 1 <= `p` < float('inf'), normalizes samples using
+ sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm.
For `p` = float('inf'), max(abs(vector)) will be used as norm for normalization.