aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/ml
diff options
context:
space:
mode:
authorYanbo Liang <ybliang8@gmail.com>2015-09-10 20:43:38 -0700
committerXiangrui Meng <meng@databricks.com>2015-09-10 20:43:38 -0700
commita140dd77c62255d6f7f6817a2517d47feb8540d4 (patch)
tree2fb9533b8e36ab564baeb1ec0aa3e368906e9fa0 /python/pyspark/ml
parent339a527141984bfb182862b0987d3c4690c9ede1 (diff)
downloadspark-a140dd77c62255d6f7f6817a2517d47feb8540d4.tar.gz
spark-a140dd77c62255d6f7f6817a2517d47feb8540d4.tar.bz2
spark-a140dd77c62255d6f7f6817a2517d47feb8540d4.zip
[SPARK-10027] [ML] [PySpark] Add Python API missing methods for ml.feature
Missing method of ml.feature are listed here: ```StringIndexer``` lacks of parameter ```handleInvalid```. ```StringIndexerModel``` lacks of method ```labels```. ```VectorIndexerModel``` lacks of methods ```numFeatures``` and ```categoryMaps```. Author: Yanbo Liang <ybliang8@gmail.com> Closes #8313 from yanboliang/spark-10027.
Diffstat (limited to 'python/pyspark/ml')
-rw-r--r--python/pyspark/ml/feature.py31
-rw-r--r--python/pyspark/ml/param/_shared_params_code_gen.py5
-rw-r--r--python/pyspark/ml/param/shared.py31
3 files changed, 59 insertions, 8 deletions
diff --git a/python/pyspark/ml/feature.py b/python/pyspark/ml/feature.py
index 1c423486be..71dc636b83 100644
--- a/python/pyspark/ml/feature.py
+++ b/python/pyspark/ml/feature.py
@@ -920,7 +920,7 @@ class StandardScalerModel(JavaModel):
@inherit_doc
-class StringIndexer(JavaEstimator, HasInputCol, HasOutputCol):
+class StringIndexer(JavaEstimator, HasInputCol, HasOutputCol, HasHandleInvalid):
"""
.. note:: Experimental
@@ -943,19 +943,20 @@ class StringIndexer(JavaEstimator, HasInputCol, HasOutputCol):
"""
@keyword_only
- def __init__(self, inputCol=None, outputCol=None):
+ def __init__(self, inputCol=None, outputCol=None, handleInvalid="error"):
"""
- __init__(self, inputCol=None, outputCol=None)
+ __init__(self, inputCol=None, outputCol=None, handleInvalid="error")
"""
super(StringIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid)
+ self._setDefault(handleInvalid="error")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
- def setParams(self, inputCol=None, outputCol=None):
+ def setParams(self, inputCol=None, outputCol=None, handleInvalid="error"):
"""
- setParams(self, inputCol=None, outputCol=None)
+ setParams(self, inputCol=None, outputCol=None, handleInvalid="error")
Sets params for this StringIndexer.
"""
kwargs = self.setParams._input_kwargs
@@ -1235,6 +1236,10 @@ class VectorIndexer(JavaEstimator, HasInputCol, HasOutputCol):
>>> model = indexer.fit(df)
>>> model.transform(df).head().indexed
DenseVector([1.0, 0.0])
+ >>> model.numFeatures
+ 2
+ >>> model.categoryMaps
+ {0: {0.0: 0, -1.0: 1}}
>>> indexer.setParams(outputCol="test").fit(df).transform(df).collect()[1].test
DenseVector([0.0, 1.0])
>>> params = {indexer.maxCategories: 3, indexer.outputCol: "vector"}
@@ -1297,6 +1302,22 @@ class VectorIndexerModel(JavaModel):
Model fitted by VectorIndexer.
"""
+ @property
+ def numFeatures(self):
+ """
+ Number of features, i.e., length of Vectors which this transforms.
+ """
+ return self._call_java("numFeatures")
+
+ @property
+ def categoryMaps(self):
+ """
+ Feature value index. Keys are categorical feature indices (column indices).
+ Values are maps from original features values to 0-based category indices.
+ If a feature is not in this map, it is treated as continuous.
+ """
+ return self._call_java("javaCategoryMaps")
+
@inherit_doc
class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol):
diff --git a/python/pyspark/ml/param/_shared_params_code_gen.py b/python/pyspark/ml/param/_shared_params_code_gen.py
index 69efc424ec..926375e448 100644
--- a/python/pyspark/ml/param/_shared_params_code_gen.py
+++ b/python/pyspark/ml/param/_shared_params_code_gen.py
@@ -121,7 +121,10 @@ if __name__ == "__main__":
("checkpointInterval", "checkpoint interval (>= 1)", None),
("seed", "random seed", "hash(type(self).__name__)"),
("tol", "the convergence tolerance for iterative algorithms", None),
- ("stepSize", "Step size to be used for each iteration of optimization.", None)]
+ ("stepSize", "Step size to be used for each iteration of optimization.", None),
+ ("handleInvalid", "how to handle invalid entries. Options are skip (which will filter " +
+ "out rows with bad values), or error (which will throw an errror). More options may be " +
+ "added later.", None)]
code = []
for name, doc, defaultValueStr in shared:
param_code = _gen_param_header(name, doc, defaultValueStr)
diff --git a/python/pyspark/ml/param/shared.py b/python/pyspark/ml/param/shared.py
index 5951247263..682170aee8 100644
--- a/python/pyspark/ml/param/shared.py
+++ b/python/pyspark/ml/param/shared.py
@@ -432,6 +432,33 @@ class HasStepSize(Params):
return self.getOrDefault(self.stepSize)
+class HasHandleInvalid(Params):
+ """
+ Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later..
+ """
+
+ # a placeholder to make it appear in the generated doc
+ handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.")
+
+ def __init__(self):
+ super(HasHandleInvalid, self).__init__()
+ #: param for how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.
+ self.handleInvalid = Param(self, "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.")
+
+ def setHandleInvalid(self, value):
+ """
+ Sets the value of :py:attr:`handleInvalid`.
+ """
+ self._paramMap[self.handleInvalid] = value
+ return self
+
+ def getHandleInvalid(self):
+ """
+ Gets the value of handleInvalid or its default value.
+ """
+ return self.getOrDefault(self.handleInvalid)
+
+
class DecisionTreeParams(Params):
"""
Mixin for Decision Tree parameters.
@@ -444,7 +471,7 @@ class DecisionTreeParams(Params):
minInfoGain = Param(Params._dummy(), "minInfoGain", "Minimum information gain for a split to be considered at a tree node.")
maxMemoryInMB = Param(Params._dummy(), "maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation.")
cacheNodeIds = Param(Params._dummy(), "cacheNodeIds", "If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees.")
-
+
def __init__(self):
super(DecisionTreeParams, self).__init__()
@@ -460,7 +487,7 @@ class DecisionTreeParams(Params):
self.maxMemoryInMB = Param(self, "maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation.")
#: param for If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees.
self.cacheNodeIds = Param(self, "cacheNodeIds", "If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees.")
-
+
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.