aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/ml/feature.py
diff options
context:
space:
mode:
authorBurak Yavuz <brkyvz@gmail.com>2015-05-13 13:21:36 -0700
committerXiangrui Meng <meng@databricks.com>2015-05-13 13:21:36 -0700
commit5db18ba6e1bd8c6307c41549176c53590cf344a0 (patch)
treea1fe0d90d324f009c563a1d4d4122c0f246ef895 /python/pyspark/ml/feature.py
parent51030b8a9d4f3feb7a5d2249cc867fd6a06f0336 (diff)
downloadspark-5db18ba6e1bd8c6307c41549176c53590cf344a0.tar.gz
spark-5db18ba6e1bd8c6307c41549176c53590cf344a0.tar.bz2
spark-5db18ba6e1bd8c6307c41549176c53590cf344a0.zip
[SPARK-7593] [ML] Python Api for ml.feature.Bucketizer
Added `ml.feature.Bucketizer` to PySpark. cc mengxr Author: Burak Yavuz <brkyvz@gmail.com> Closes #6124 from brkyvz/ml-bucket and squashes the following commits: 05285be [Burak Yavuz] added sphinx doc 6abb6ed [Burak Yavuz] added support for Bucketizer
Diffstat (limited to 'python/pyspark/ml/feature.py')
-rw-r--r--python/pyspark/ml/feature.py77
1 files changed, 77 insertions, 0 deletions
diff --git a/python/pyspark/ml/feature.py b/python/pyspark/ml/feature.py
index f35bc1463d..30e1fd4922 100644
--- a/python/pyspark/ml/feature.py
+++ b/python/pyspark/ml/feature.py
@@ -84,6 +84,83 @@ class Binarizer(JavaTransformer, HasInputCol, HasOutputCol):
@inherit_doc
+class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol):
+ """
+ Maps a column of continuous features to a column of feature buckets.
+
+ >>> df = sqlContext.createDataFrame([(0.1,), (0.4,), (1.2,), (1.5,)], ["values"])
+ >>> bucketizer = Bucketizer(splits=[-float("inf"), 0.5, 1.4, float("inf")],
+ ... inputCol="values", outputCol="buckets")
+ >>> bucketed = bucketizer.transform(df).collect()
+ >>> bucketed[0].buckets
+ 0.0
+ >>> bucketed[1].buckets
+ 0.0
+ >>> bucketed[2].buckets
+ 1.0
+ >>> bucketed[3].buckets
+ 2.0
+ >>> bucketizer.setParams(outputCol="b").transform(df).head().b
+ 0.0
+ """
+
+ _java_class = "org.apache.spark.ml.feature.Bucketizer"
+ # a placeholder to make it appear in the generated doc
+ splits = \
+ Param(Params._dummy(), "splits",
+ "Split points for mapping continuous features into buckets. With n+1 splits, " +
+ "there are n buckets. A bucket defined by splits x,y holds values in the " +
+ "range [x,y) except the last bucket, which also includes y. The splits " +
+ "should be strictly increasing. Values at -inf, inf must be explicitly " +
+ "provided to cover all Double values; otherwise, values outside the splits " +
+ "specified will be treated as errors.")
+
+ @keyword_only
+ def __init__(self, splits=None, inputCol=None, outputCol=None):
+ """
+ __init__(self, splits=None, inputCol=None, outputCol=None)
+ """
+ super(Bucketizer, self).__init__()
+ #: param for Splitting points for mapping continuous features into buckets. With n+1 splits,
+ # there are n buckets. A bucket defined by splits x,y holds values in the range [x,y)
+ # except the last bucket, which also includes y. The splits should be strictly increasing.
+ # Values at -inf, inf must be explicitly provided to cover all Double values; otherwise,
+ # values outside the splits specified will be treated as errors.
+ self.splits = \
+ Param(self, "splits",
+ "Split points for mapping continuous features into buckets. With n+1 splits, " +
+ "there are n buckets. A bucket defined by splits x,y holds values in the " +
+ "range [x,y) except the last bucket, which also includes y. The splits " +
+ "should be strictly increasing. Values at -inf, inf must be explicitly " +
+ "provided to cover all Double values; otherwise, values outside the splits " +
+ "specified will be treated as errors.")
+ kwargs = self.__init__._input_kwargs
+ self.setParams(**kwargs)
+
+ @keyword_only
+ def setParams(self, splits=None, inputCol=None, outputCol=None):
+ """
+ setParams(self, splits=None, inputCol=None, outputCol=None)
+ Sets params for this Bucketizer.
+ """
+ kwargs = self.setParams._input_kwargs
+ return self._set(**kwargs)
+
+ def setSplits(self, value):
+ """
+ Sets the value of :py:attr:`splits`.
+ """
+ self.paramMap[self.splits] = value
+ return self
+
+ def getSplits(self):
+ """
+ Gets the value of threshold or its default value.
+ """
+ return self.getOrDefault(self.splits)
+
+
+@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures):
"""
Maps a sequence of terms to their term frequencies using the