diff options
author | Holden Karau <holden@us.ibm.com> | 2016-01-26 15:53:48 -0800 |
---|---|---|
committer | Joseph K. Bradley <joseph@databricks.com> | 2016-01-26 15:53:48 -0800 |
commit | eb917291ca1a2d68ca0639cb4b1464a546603eba (patch) | |
tree | 380dcaa33273baa68beaf089387bd498d5ee88e8 /python/pyspark/ml/evaluation.py | |
parent | 19fdb21afbf0eae4483cf6d4ef32daffd1994b89 (diff) | |
download | spark-eb917291ca1a2d68ca0639cb4b1464a546603eba.tar.gz spark-eb917291ca1a2d68ca0639cb4b1464a546603eba.tar.bz2 spark-eb917291ca1a2d68ca0639cb4b1464a546603eba.zip |
[SPARK-10509][PYSPARK] Reduce excessive param boiler plate code
The current python ml params require cut-and-pasting the param setup and description between the class & ```__init__``` methods. Remove this possible case of errors & simplify use of custom params by adding a ```_copy_new_parent``` method to param so as to avoid cut and pasting (and cut and pasting at different indentation levels urgh).
Author: Holden Karau <holden@us.ibm.com>
Closes #10216 from holdenk/SPARK-10509-excessive-param-boiler-plate-code.
Diffstat (limited to 'python/pyspark/ml/evaluation.py')
-rw-r--r-- | python/pyspark/ml/evaluation.py | 12 |
1 files changed, 0 insertions, 12 deletions
diff --git a/python/pyspark/ml/evaluation.py b/python/pyspark/ml/evaluation.py index 6ff68abd8f..c9b95b3bf4 100644 --- a/python/pyspark/ml/evaluation.py +++ b/python/pyspark/ml/evaluation.py @@ -124,7 +124,6 @@ class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPrediction .. versionadded:: 1.4.0 """ - # a placeholder to make it appear in the generated doc metricName = Param(Params._dummy(), "metricName", "metric name in evaluation (areaUnderROC|areaUnderPR)") @@ -138,9 +137,6 @@ class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPrediction super(BinaryClassificationEvaluator, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid) - #: param for metric name in evaluation (areaUnderROC|areaUnderPR) - self.metricName = Param(self, "metricName", - "metric name in evaluation (areaUnderROC|areaUnderPR)") self._setDefault(rawPredictionCol="rawPrediction", labelCol="label", metricName="areaUnderROC") kwargs = self.__init__._input_kwargs @@ -210,9 +206,6 @@ class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol): super(RegressionEvaluator, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid) - #: param for metric name in evaluation (mse|rmse|r2|mae) - self.metricName = Param(self, "metricName", - "metric name in evaluation (mse|rmse|r2|mae)") self._setDefault(predictionCol="prediction", labelCol="label", metricName="rmse") kwargs = self.__init__._input_kwargs @@ -265,7 +258,6 @@ class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictio .. versionadded:: 1.5.0 """ - # a placeholder to make it appear in the generated doc metricName = Param(Params._dummy(), "metricName", "metric name in evaluation " "(f1|precision|recall|weightedPrecision|weightedRecall)") @@ -280,10 +272,6 @@ class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictio super(MulticlassClassificationEvaluator, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator", self.uid) - # param for metric name in evaluation (f1|precision|recall|weightedPrecision|weightedRecall) - self.metricName = Param(self, "metricName", - "metric name in evaluation" - " (f1|precision|recall|weightedPrecision|weightedRecall)") self._setDefault(predictionCol="prediction", labelCol="label", metricName="f1") kwargs = self.__init__._input_kwargs |