aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorPeng <peng.meng@intel.com>2016-12-28 00:49:36 -0800
committerYanbo Liang <ybliang8@gmail.com>2016-12-28 00:49:36 -0800
commit79ff8536315aef97ee940c52d71cd8de777c7ce6 (patch)
tree97008066cdca759546e876c5379aa150f91ef27b /python
parent2af8b5cffa97cd2ca11afe504f6756fe5721dfb6 (diff)
downloadspark-79ff8536315aef97ee940c52d71cd8de777c7ce6.tar.gz
spark-79ff8536315aef97ee940c52d71cd8de777c7ce6.tar.bz2
spark-79ff8536315aef97ee940c52d71cd8de777c7ce6.zip
[SPARK-17645][MLLIB][ML] add feature selector method based on: False Discovery Rate (FDR) and Family wise error rate (FWE)
## What changes were proposed in this pull request? Univariate feature selection works by selecting the best features based on univariate statistical tests. FDR and FWE are a popular univariate statistical test for feature selection. In 2005, the Benjamini and Hochberg paper on FDR was identified as one of the 25 most-cited statistical papers. The FDR uses the Benjamini-Hochberg procedure in this PR. https://en.wikipedia.org/wiki/False_discovery_rate. In statistics, FWE is the probability of making one or more false discoveries, or type I errors, among all the hypotheses when performing multiple hypotheses tests. https://en.wikipedia.org/wiki/Family-wise_error_rate We add FDR and FWE methods for ChiSqSelector in this PR, like it is implemented in scikit-learn. http://scikit-learn.org/stable/modules/feature_selection.html#univariate-feature-selection ## How was this patch tested? ut will be added soon (Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) (If this patch involves UI changes, please attach a screenshot; otherwise, remove this) Author: Peng <peng.meng@intel.com> Author: Peng, Meng <peng.meng@intel.com> Closes #15212 from mpjlu/fdr_fwe.
Diffstat (limited to 'python')
-rwxr-xr-xpython/pyspark/ml/feature.py74
-rw-r--r--python/pyspark/mllib/feature.py50
2 files changed, 109 insertions, 15 deletions
diff --git a/python/pyspark/ml/feature.py b/python/pyspark/ml/feature.py
index 62c31431b5..dbd17e01d2 100755
--- a/python/pyspark/ml/feature.py
+++ b/python/pyspark/ml/feature.py
@@ -2629,8 +2629,28 @@ class ChiSqSelector(JavaEstimator, HasFeaturesCol, HasOutputCol, HasLabelCol, Ja
"""
.. note:: Experimental
- Chi-Squared feature selection, which selects categorical features to use for predicting a
- categorical label.
+ Creates a ChiSquared feature selector.
+ The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
+ `fdr`, `fwe`.
+
+ * `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
+
+ * `percentile` is similar but chooses a fraction of all features
+ instead of a fixed number.
+
+ * `fpr` chooses all features whose p-value is below a threshold,
+ thus controlling the false positive rate of selection.
+
+ * `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
+ False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
+ to choose all features whose false discovery rate is below a threshold.
+
+ * `fwe` chooses all features whose p-values is below a threshold,
+ thus controlling the family-wise error rate of selection.
+
+ By default, the selection method is `numTopFeatures`, with the default number of top features
+ set to 50.
+
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame(
@@ -2676,27 +2696,37 @@ class ChiSqSelector(JavaEstimator, HasFeaturesCol, HasOutputCol, HasLabelCol, Ja
fpr = Param(Params._dummy(), "fpr", "The highest p-value for features to be kept.",
typeConverter=TypeConverters.toFloat)
+ fdr = Param(Params._dummy(), "fdr", "The upper bound of the expected false discovery rate.",
+ typeConverter=TypeConverters.toFloat)
+
+ fwe = Param(Params._dummy(), "fwe", "The upper bound of the expected family-wise error rate.",
+ typeConverter=TypeConverters.toFloat)
+
@keyword_only
def __init__(self, numTopFeatures=50, featuresCol="features", outputCol=None,
- labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05):
+ labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
+ fdr=0.05, fwe=0.05):
"""
__init__(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
- labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05)
+ labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
+ fdr=0.05, fwe=0.05)
"""
super(ChiSqSelector, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ChiSqSelector", self.uid)
self._setDefault(numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1,
- fpr=0.05)
+ fpr=0.05, fdr=0.05, fwe=0.05)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None,
- labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05):
+ labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
+ fdr=0.05, fwe=0.05):
"""
setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
- labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05)
+ labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
+ fdr=0.05, fwe=0.05)
Sets params for this ChiSqSelector.
"""
kwargs = self.setParams._input_kwargs
@@ -2761,6 +2791,36 @@ class ChiSqSelector(JavaEstimator, HasFeaturesCol, HasOutputCol, HasLabelCol, Ja
"""
return self.getOrDefault(self.fpr)
+ @since("2.2.0")
+ def setFdr(self, value):
+ """
+ Sets the value of :py:attr:`fdr`.
+ Only applicable when selectorType = "fdr".
+ """
+ return self._set(fdr=value)
+
+ @since("2.2.0")
+ def getFdr(self):
+ """
+ Gets the value of fdr or its default value.
+ """
+ return self.getOrDefault(self.fdr)
+
+ @since("2.2.0")
+ def setFwe(self, value):
+ """
+ Sets the value of :py:attr:`fwe`.
+ Only applicable when selectorType = "fwe".
+ """
+ return self._set(fwe=value)
+
+ @since("2.2.0")
+ def getFwe(self):
+ """
+ Gets the value of fwe or its default value.
+ """
+ return self.getOrDefault(self.fwe)
+
def _create_model(self, java_model):
return ChiSqSelectorModel(java_model)
diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py
index bde0f67be7..61f2bc7492 100644
--- a/python/pyspark/mllib/feature.py
+++ b/python/pyspark/mllib/feature.py
@@ -274,11 +274,24 @@ class ChiSqSelectorModel(JavaVectorTransformer):
class ChiSqSelector(object):
"""
Creates a ChiSquared feature selector.
- The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`.
- `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
- `percentile` is similar but chooses a fraction of all features instead of a fixed number.
- `fpr` chooses all features whose p-value is below a threshold, thus controlling the false
- positive rate of selection.
+ The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
+ `fdr`, `fwe`.
+
+ * `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
+
+ * `percentile` is similar but chooses a fraction of all features
+ instead of a fixed number.
+
+ * `fpr` chooses all features whose p-value is below a threshold,
+ thus controlling the false positive rate of selection.
+
+ * `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
+ False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
+ to choose all features whose false discovery rate is below a threshold.
+
+ * `fwe` chooses all features whose p-values is below a threshold,
+ thus controlling the family-wise error rate of selection.
+
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
@@ -305,11 +318,14 @@ class ChiSqSelector(object):
.. versionadded:: 1.4.0
"""
- def __init__(self, numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1, fpr=0.05):
+ def __init__(self, numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
+ fdr=0.05, fwe=0.05):
self.numTopFeatures = numTopFeatures
self.selectorType = selectorType
self.percentile = percentile
self.fpr = fpr
+ self.fdr = fdr
+ self.fwe = fwe
@since('2.1.0')
def setNumTopFeatures(self, numTopFeatures):
@@ -338,11 +354,29 @@ class ChiSqSelector(object):
self.fpr = float(fpr)
return self
+ @since('2.2.0')
+ def setFdr(self, fdr):
+ """
+ set FDR [0.0, 1.0] for feature selection by FDR.
+ Only applicable when selectorType = "fdr".
+ """
+ self.fdr = float(fdr)
+ return self
+
+ @since('2.2.0')
+ def setFwe(self, fwe):
+ """
+ set FWE [0.0, 1.0] for feature selection by FWE.
+ Only applicable when selectorType = "fwe".
+ """
+ self.fwe = float(fwe)
+ return self
+
@since('2.1.0')
def setSelectorType(self, selectorType):
"""
set the selector type of the ChisqSelector.
- Supported options: "numTopFeatures" (default), "percentile", "fpr".
+ Supported options: "numTopFeatures" (default), "percentile", "fpr", "fdr", "fwe".
"""
self.selectorType = str(selectorType)
return self
@@ -358,7 +392,7 @@ class ChiSqSelector(object):
Apply feature discretizer before using this function.
"""
jmodel = callMLlibFunc("fitChiSqSelector", self.selectorType, self.numTopFeatures,
- self.percentile, self.fpr, data)
+ self.percentile, self.fpr, self.fdr, self.fwe, data)
return ChiSqSelectorModel(jmodel)