aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/ml/param
diff options
context:
space:
mode:
authorHolden Karau <holden@us.ibm.com>2016-01-26 15:53:48 -0800
committerJoseph K. Bradley <joseph@databricks.com>2016-01-26 15:53:48 -0800
commiteb917291ca1a2d68ca0639cb4b1464a546603eba (patch)
tree380dcaa33273baa68beaf089387bd498d5ee88e8 /python/pyspark/ml/param
parent19fdb21afbf0eae4483cf6d4ef32daffd1994b89 (diff)
downloadspark-eb917291ca1a2d68ca0639cb4b1464a546603eba.tar.gz
spark-eb917291ca1a2d68ca0639cb4b1464a546603eba.tar.bz2
spark-eb917291ca1a2d68ca0639cb4b1464a546603eba.zip
[SPARK-10509][PYSPARK] Reduce excessive param boiler plate code
The current python ml params require cut-and-pasting the param setup and description between the class & ```__init__``` methods. Remove this possible case of errors & simplify use of custom params by adding a ```_copy_new_parent``` method to param so as to avoid cut and pasting (and cut and pasting at different indentation levels urgh). Author: Holden Karau <holden@us.ibm.com> Closes #10216 from holdenk/SPARK-10509-excessive-param-boiler-plate-code.
Diffstat (limited to 'python/pyspark/ml/param')
-rw-r--r--python/pyspark/ml/param/__init__.py22
-rw-r--r--python/pyspark/ml/param/_shared_params_code_gen.py17
-rw-r--r--python/pyspark/ml/param/shared.py81
3 files changed, 28 insertions, 92 deletions
diff --git a/python/pyspark/ml/param/__init__.py b/python/pyspark/ml/param/__init__.py
index 92ce96aa3c..3da36d32c5 100644
--- a/python/pyspark/ml/param/__init__.py
+++ b/python/pyspark/ml/param/__init__.py
@@ -40,6 +40,15 @@ class Param(object):
self.doc = str(doc)
self.expectedType = expectedType
+ def _copy_new_parent(self, parent):
+ """Copy the current param to a new parent, must be a dummy param."""
+ if self.parent == "undefined":
+ param = copy.copy(self)
+ param.parent = parent.uid
+ return param
+ else:
+ raise ValueError("Cannot copy from non-dummy parent %s." % parent)
+
def __str__(self):
return str(self.parent) + "__" + self.name
@@ -77,6 +86,19 @@ class Params(Identifiable):
#: value returned by :py:func:`params`
self._params = None
+ # Copy the params from the class to the object
+ self._copy_params()
+
+ def _copy_params(self):
+ """
+ Copy all params defined on the class to current object.
+ """
+ cls = type(self)
+ src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
+ src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
+ for name, param in src_params:
+ setattr(self, name, param._copy_new_parent(self))
+
@property
@since("1.3.0")
def params(self):
diff --git a/python/pyspark/ml/param/_shared_params_code_gen.py b/python/pyspark/ml/param/_shared_params_code_gen.py
index 82855bc4c7..5e297b8214 100644
--- a/python/pyspark/ml/param/_shared_params_code_gen.py
+++ b/python/pyspark/ml/param/_shared_params_code_gen.py
@@ -50,13 +50,11 @@ def _gen_param_header(name, doc, defaultValueStr, expectedType):
Mixin for param $name: $doc
"""
- # a placeholder to make it appear in the generated doc
$name = Param(Params._dummy(), "$name", "$doc", $expectedType)
def __init__(self):
- super(Has$Name, self).__init__()
- #: param for $doc
- self.$name = Param(self, "$name", "$doc", $expectedType)'''
+ super(Has$Name, self).__init__()'''
+
if defaultValueStr is not None:
template += '''
self._setDefault($name=$defaultValueStr)'''
@@ -171,22 +169,17 @@ if __name__ == "__main__":
Mixin for Decision Tree parameters.
"""
- # a placeholder to make it appear in the generated doc
$dummyPlaceHolders
def __init__(self):
- super(DecisionTreeParams, self).__init__()
- $realParams'''
+ super(DecisionTreeParams, self).__init__()'''
dtParamMethods = ""
dummyPlaceholders = ""
- realParams = ""
paramTemplate = """$name = Param($owner, "$name", "$doc")"""
for name, doc in decisionTreeParams:
variable = paramTemplate.replace("$name", name).replace("$doc", doc)
dummyPlaceholders += variable.replace("$owner", "Params._dummy()") + "\n "
- realParams += "#: param for " + doc + "\n "
- realParams += "self." + variable.replace("$owner", "self") + "\n "
dtParamMethods += _gen_param_code(name, doc, None) + "\n"
- code.append(decisionTreeCode.replace("$dummyPlaceHolders", dummyPlaceholders)
- .replace("$realParams", realParams) + dtParamMethods)
+ code.append(decisionTreeCode.replace("$dummyPlaceHolders", dummyPlaceholders) + "\n" +
+ dtParamMethods)
print("\n\n\n".join(code))
diff --git a/python/pyspark/ml/param/shared.py b/python/pyspark/ml/param/shared.py
index 23f9431484..db4a8a54d4 100644
--- a/python/pyspark/ml/param/shared.py
+++ b/python/pyspark/ml/param/shared.py
@@ -25,13 +25,10 @@ class HasMaxIter(Params):
Mixin for param maxIter: max number of iterations (>= 0).
"""
- # a placeholder to make it appear in the generated doc
maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0).", int)
def __init__(self):
super(HasMaxIter, self).__init__()
- #: param for max number of iterations (>= 0).
- self.maxIter = Param(self, "maxIter", "max number of iterations (>= 0).", int)
def setMaxIter(self, value):
"""
@@ -52,13 +49,10 @@ class HasRegParam(Params):
Mixin for param regParam: regularization parameter (>= 0).
"""
- # a placeholder to make it appear in the generated doc
regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0).", float)
def __init__(self):
super(HasRegParam, self).__init__()
- #: param for regularization parameter (>= 0).
- self.regParam = Param(self, "regParam", "regularization parameter (>= 0).", float)
def setRegParam(self, value):
"""
@@ -79,13 +73,10 @@ class HasFeaturesCol(Params):
Mixin for param featuresCol: features column name.
"""
- # a placeholder to make it appear in the generated doc
featuresCol = Param(Params._dummy(), "featuresCol", "features column name.", str)
def __init__(self):
super(HasFeaturesCol, self).__init__()
- #: param for features column name.
- self.featuresCol = Param(self, "featuresCol", "features column name.", str)
self._setDefault(featuresCol='features')
def setFeaturesCol(self, value):
@@ -107,13 +98,10 @@ class HasLabelCol(Params):
Mixin for param labelCol: label column name.
"""
- # a placeholder to make it appear in the generated doc
labelCol = Param(Params._dummy(), "labelCol", "label column name.", str)
def __init__(self):
super(HasLabelCol, self).__init__()
- #: param for label column name.
- self.labelCol = Param(self, "labelCol", "label column name.", str)
self._setDefault(labelCol='label')
def setLabelCol(self, value):
@@ -135,13 +123,10 @@ class HasPredictionCol(Params):
Mixin for param predictionCol: prediction column name.
"""
- # a placeholder to make it appear in the generated doc
predictionCol = Param(Params._dummy(), "predictionCol", "prediction column name.", str)
def __init__(self):
super(HasPredictionCol, self).__init__()
- #: param for prediction column name.
- self.predictionCol = Param(self, "predictionCol", "prediction column name.", str)
self._setDefault(predictionCol='prediction')
def setPredictionCol(self, value):
@@ -163,13 +148,10 @@ class HasProbabilityCol(Params):
Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
"""
- # a placeholder to make it appear in the generated doc
probabilityCol = Param(Params._dummy(), "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.", str)
def __init__(self):
super(HasProbabilityCol, self).__init__()
- #: param for Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
- self.probabilityCol = Param(self, "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.", str)
self._setDefault(probabilityCol='probability')
def setProbabilityCol(self, value):
@@ -191,13 +173,10 @@ class HasRawPredictionCol(Params):
Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
"""
- # a placeholder to make it appear in the generated doc
rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", str)
def __init__(self):
super(HasRawPredictionCol, self).__init__()
- #: param for raw prediction (a.k.a. confidence) column name.
- self.rawPredictionCol = Param(self, "rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", str)
self._setDefault(rawPredictionCol='rawPrediction')
def setRawPredictionCol(self, value):
@@ -219,13 +198,10 @@ class HasInputCol(Params):
Mixin for param inputCol: input column name.
"""
- # a placeholder to make it appear in the generated doc
inputCol = Param(Params._dummy(), "inputCol", "input column name.", str)
def __init__(self):
super(HasInputCol, self).__init__()
- #: param for input column name.
- self.inputCol = Param(self, "inputCol", "input column name.", str)
def setInputCol(self, value):
"""
@@ -246,13 +222,10 @@ class HasInputCols(Params):
Mixin for param inputCols: input column names.
"""
- # a placeholder to make it appear in the generated doc
inputCols = Param(Params._dummy(), "inputCols", "input column names.", None)
def __init__(self):
super(HasInputCols, self).__init__()
- #: param for input column names.
- self.inputCols = Param(self, "inputCols", "input column names.", None)
def setInputCols(self, value):
"""
@@ -273,13 +246,10 @@ class HasOutputCol(Params):
Mixin for param outputCol: output column name.
"""
- # a placeholder to make it appear in the generated doc
outputCol = Param(Params._dummy(), "outputCol", "output column name.", str)
def __init__(self):
super(HasOutputCol, self).__init__()
- #: param for output column name.
- self.outputCol = Param(self, "outputCol", "output column name.", str)
self._setDefault(outputCol=self.uid + '__output')
def setOutputCol(self, value):
@@ -301,13 +271,10 @@ class HasNumFeatures(Params):
Mixin for param numFeatures: number of features.
"""
- # a placeholder to make it appear in the generated doc
numFeatures = Param(Params._dummy(), "numFeatures", "number of features.", int)
def __init__(self):
super(HasNumFeatures, self).__init__()
- #: param for number of features.
- self.numFeatures = Param(self, "numFeatures", "number of features.", int)
def setNumFeatures(self, value):
"""
@@ -328,13 +295,10 @@ class HasCheckpointInterval(Params):
Mixin for param checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations.
"""
- # a placeholder to make it appear in the generated doc
checkpointInterval = Param(Params._dummy(), "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations.", int)
def __init__(self):
super(HasCheckpointInterval, self).__init__()
- #: param for set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations.
- self.checkpointInterval = Param(self, "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations.", int)
def setCheckpointInterval(self, value):
"""
@@ -355,13 +319,10 @@ class HasSeed(Params):
Mixin for param seed: random seed.
"""
- # a placeholder to make it appear in the generated doc
seed = Param(Params._dummy(), "seed", "random seed.", int)
def __init__(self):
super(HasSeed, self).__init__()
- #: param for random seed.
- self.seed = Param(self, "seed", "random seed.", int)
self._setDefault(seed=hash(type(self).__name__))
def setSeed(self, value):
@@ -383,13 +344,10 @@ class HasTol(Params):
Mixin for param tol: the convergence tolerance for iterative algorithms.
"""
- # a placeholder to make it appear in the generated doc
tol = Param(Params._dummy(), "tol", "the convergence tolerance for iterative algorithms.", float)
def __init__(self):
super(HasTol, self).__init__()
- #: param for the convergence tolerance for iterative algorithms.
- self.tol = Param(self, "tol", "the convergence tolerance for iterative algorithms.", float)
def setTol(self, value):
"""
@@ -410,13 +368,10 @@ class HasStepSize(Params):
Mixin for param stepSize: Step size to be used for each iteration of optimization.
"""
- # a placeholder to make it appear in the generated doc
stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization.", float)
def __init__(self):
super(HasStepSize, self).__init__()
- #: param for Step size to be used for each iteration of optimization.
- self.stepSize = Param(self, "stepSize", "Step size to be used for each iteration of optimization.", float)
def setStepSize(self, value):
"""
@@ -437,13 +392,10 @@ class HasHandleInvalid(Params):
Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.
"""
- # a placeholder to make it appear in the generated doc
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.", str)
def __init__(self):
super(HasHandleInvalid, self).__init__()
- #: param for how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.
- self.handleInvalid = Param(self, "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an errror). More options may be added later.", str)
def setHandleInvalid(self, value):
"""
@@ -464,13 +416,10 @@ class HasElasticNetParam(Params):
Mixin for param elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
"""
- # a placeholder to make it appear in the generated doc
elasticNetParam = Param(Params._dummy(), "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", float)
def __init__(self):
super(HasElasticNetParam, self).__init__()
- #: param for the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
- self.elasticNetParam = Param(self, "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", float)
self._setDefault(elasticNetParam=0.0)
def setElasticNetParam(self, value):
@@ -492,13 +441,10 @@ class HasFitIntercept(Params):
Mixin for param fitIntercept: whether to fit an intercept term.
"""
- # a placeholder to make it appear in the generated doc
fitIntercept = Param(Params._dummy(), "fitIntercept", "whether to fit an intercept term.", bool)
def __init__(self):
super(HasFitIntercept, self).__init__()
- #: param for whether to fit an intercept term.
- self.fitIntercept = Param(self, "fitIntercept", "whether to fit an intercept term.", bool)
self._setDefault(fitIntercept=True)
def setFitIntercept(self, value):
@@ -520,13 +466,10 @@ class HasStandardization(Params):
Mixin for param standardization: whether to standardize the training features before fitting the model.
"""
- # a placeholder to make it appear in the generated doc
standardization = Param(Params._dummy(), "standardization", "whether to standardize the training features before fitting the model.", bool)
def __init__(self):
super(HasStandardization, self).__init__()
- #: param for whether to standardize the training features before fitting the model.
- self.standardization = Param(self, "standardization", "whether to standardize the training features before fitting the model.", bool)
self._setDefault(standardization=True)
def setStandardization(self, value):
@@ -548,13 +491,10 @@ class HasThresholds(Params):
Mixin for param thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values >= 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class' threshold.
"""
- # a placeholder to make it appear in the generated doc
thresholds = Param(Params._dummy(), "thresholds", "Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values >= 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class' threshold.", None)
def __init__(self):
super(HasThresholds, self).__init__()
- #: param for Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values >= 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class' threshold.
- self.thresholds = Param(self, "thresholds", "Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values >= 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class' threshold.", None)
def setThresholds(self, value):
"""
@@ -575,13 +515,10 @@ class HasWeightCol(Params):
Mixin for param weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0.
"""
- # a placeholder to make it appear in the generated doc
weightCol = Param(Params._dummy(), "weightCol", "weight column name. If this is not set or empty, we treat all instance weights as 1.0.", str)
def __init__(self):
super(HasWeightCol, self).__init__()
- #: param for weight column name. If this is not set or empty, we treat all instance weights as 1.0.
- self.weightCol = Param(self, "weightCol", "weight column name. If this is not set or empty, we treat all instance weights as 1.0.", str)
def setWeightCol(self, value):
"""
@@ -602,13 +539,10 @@ class HasSolver(Params):
Mixin for param solver: the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.
"""
- # a placeholder to make it appear in the generated doc
solver = Param(Params._dummy(), "solver", "the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.", str)
def __init__(self):
super(HasSolver, self).__init__()
- #: param for the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.
- self.solver = Param(self, "solver", "the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.", str)
self._setDefault(solver='auto')
def setSolver(self, value):
@@ -630,7 +564,6 @@ class DecisionTreeParams(Params):
Mixin for Decision Tree parameters.
"""
- # a placeholder to make it appear in the generated doc
maxDepth = Param(Params._dummy(), "maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.")
maxBins = Param(Params._dummy(), "maxBins", "Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature.")
minInstancesPerNode = Param(Params._dummy(), "minInstancesPerNode", "Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1.")
@@ -641,19 +574,7 @@ class DecisionTreeParams(Params):
def __init__(self):
super(DecisionTreeParams, self).__init__()
- #: param for Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
- self.maxDepth = Param(self, "maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.")
- #: param for Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature.
- self.maxBins = Param(self, "maxBins", "Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature.")
- #: param for Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1.
- self.minInstancesPerNode = Param(self, "minInstancesPerNode", "Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1.")
- #: param for Minimum information gain for a split to be considered at a tree node.
- self.minInfoGain = Param(self, "minInfoGain", "Minimum information gain for a split to be considered at a tree node.")
- #: param for Maximum memory in MB allocated to histogram aggregation.
- self.maxMemoryInMB = Param(self, "maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation.")
- #: param for If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees. Users can set how often should the cache be checkpointed or disable it by setting checkpointInterval.
- self.cacheNodeIds = Param(self, "cacheNodeIds", "If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees. Users can set how often should the cache be checkpointed or disable it by setting checkpointInterval.")
-
+
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.