aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorGlenn Weidner <gweidner@us.ibm.com>2015-05-10 19:18:32 -0700
committerJoseph K. Bradley <joseph@databricks.com>2015-05-10 19:18:32 -0700
commitc5aca0c27be31e94ffdb01ef2eb29d3b373d7f4c (patch)
treec33ecea0860e976ca6f78de6b7d91e5dee33dafd /python
parent8c07c75c9831d6c34f69fe840edb6470d4dfdfef (diff)
downloadspark-c5aca0c27be31e94ffdb01ef2eb29d3b373d7f4c.tar.gz
spark-c5aca0c27be31e94ffdb01ef2eb29d3b373d7f4c.tar.bz2
spark-c5aca0c27be31e94ffdb01ef2eb29d3b373d7f4c.zip
[SPARK-7427] [PYSPARK] Make sharedParams match in Scala, Python
Modified 2 files: python/pyspark/ml/param/_shared_params_code_gen.py python/pyspark/ml/param/shared.py Generated shared.py on Linux using Python 2.6.6 on Redhat Enterprise Linux Server 6.6. python _shared_params_code_gen.py > shared.py Only changed maxIter, regParam, rawPredictionCol based on strings from SharedParamsCodeGen.scala. Note warning was displayed when committing shared.py: warning: LF will be replaced by CRLF in python/pyspark/ml/param/shared.py. Author: Glenn Weidner <gweidner@us.ibm.com> Closes #6023 from gweidner/br-7427 and squashes the following commits: db72e32 [Glenn Weidner] [SPARK-7427] [PySpark] Make sharedParams match in Scala, Python 825e4a9 [Glenn Weidner] [SPARK-7427] [PySpark] Make sharedParams match in Scala, Python e6a865e [Glenn Weidner] [SPARK-7427] [PySpark] Make sharedParams match in Scala, Python 1eee702 [Glenn Weidner] Merge remote-tracking branch 'upstream/master' 1ac10e5 [Glenn Weidner] Merge remote-tracking branch 'upstream/master' cafd104 [Glenn Weidner] Merge remote-tracking branch 'upstream/master' 9bea1eb [Glenn Weidner] Merge remote-tracking branch 'upstream/master' 4a35c20 [Glenn Weidner] Merge remote-tracking branch 'upstream/master' 9790cbe [Glenn Weidner] Merge remote-tracking branch 'upstream/master' d9c30f4 [Glenn Weidner] [SPARK-7275] [SQL] [WIP] Make LogicalRelation public
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/ml/param/_shared_params_code_gen.py6
-rw-r--r--python/pyspark/ml/param/shared.py30
-rw-r--r--python/pyspark/ml/tests.py4
3 files changed, 19 insertions, 21 deletions
diff --git a/python/pyspark/ml/param/_shared_params_code_gen.py b/python/pyspark/ml/param/_shared_params_code_gen.py
index ed3171b697..3be0979b92 100644
--- a/python/pyspark/ml/param/_shared_params_code_gen.py
+++ b/python/pyspark/ml/param/_shared_params_code_gen.py
@@ -88,12 +88,12 @@ if __name__ == "__main__":
print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
print("from pyspark.ml.param import Param, Params\n\n")
shared = [
- ("maxIter", "max number of iterations", None),
- ("regParam", "regularization constant", None),
+ ("maxIter", "max number of iterations (>= 0)", None),
+ ("regParam", "regularization parameter (>= 0)", None),
("featuresCol", "features column name", "'features'"),
("labelCol", "label column name", "'label'"),
("predictionCol", "prediction column name", "'prediction'"),
- ("rawPredictionCol", "raw prediction column name", "'rawPrediction'"),
+ ("rawPredictionCol", "raw prediction (a.k.a. confidence) column name", "'rawPrediction'"),
("inputCol", "input column name", None),
("inputCols", "input column names", None),
("outputCol", "output column name", None),
diff --git a/python/pyspark/ml/param/shared.py b/python/pyspark/ml/param/shared.py
index d0bcadee22..4b22322b89 100644
--- a/python/pyspark/ml/param/shared.py
+++ b/python/pyspark/ml/param/shared.py
@@ -22,16 +22,16 @@ from pyspark.ml.param import Param, Params
class HasMaxIter(Params):
"""
- Mixin for param maxIter: max number of iterations.
+ Mixin for param maxIter: max number of iterations (>= 0).
"""
# a placeholder to make it appear in the generated doc
- maxIter = Param(Params._dummy(), "maxIter", "max number of iterations")
+ maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0)")
def __init__(self):
super(HasMaxIter, self).__init__()
- #: param for max number of iterations
- self.maxIter = Param(self, "maxIter", "max number of iterations")
+ #: param for max number of iterations (>= 0)
+ self.maxIter = Param(self, "maxIter", "max number of iterations (>= 0)")
if None is not None:
self._setDefault(maxIter=None)
@@ -51,16 +51,16 @@ class HasMaxIter(Params):
class HasRegParam(Params):
"""
- Mixin for param regParam: regularization constant.
+ Mixin for param regParam: regularization parameter (>= 0).
"""
# a placeholder to make it appear in the generated doc
- regParam = Param(Params._dummy(), "regParam", "regularization constant")
+ regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0)")
def __init__(self):
super(HasRegParam, self).__init__()
- #: param for regularization constant
- self.regParam = Param(self, "regParam", "regularization constant")
+ #: param for regularization parameter (>= 0)
+ self.regParam = Param(self, "regParam", "regularization parameter (>= 0)")
if None is not None:
self._setDefault(regParam=None)
@@ -167,16 +167,16 @@ class HasPredictionCol(Params):
class HasRawPredictionCol(Params):
"""
- Mixin for param rawPredictionCol: raw prediction column name.
+ Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
"""
# a placeholder to make it appear in the generated doc
- rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction column name")
+ rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name")
def __init__(self):
super(HasRawPredictionCol, self).__init__()
- #: param for raw prediction column name
- self.rawPredictionCol = Param(self, "rawPredictionCol", "raw prediction column name")
+ #: param for raw prediction (a.k.a. confidence) column name
+ self.rawPredictionCol = Param(self, "rawPredictionCol", "raw prediction (a.k.a. confidence) column name")
if 'rawPrediction' is not None:
self._setDefault(rawPredictionCol='rawPrediction')
@@ -403,14 +403,12 @@ class HasStepSize(Params):
"""
# a placeholder to make it appear in the generated doc
- stepSize = Param(Params._dummy(), "stepSize",
- "Step size to be used for each iteration of optimization.")
+ stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization.")
def __init__(self):
super(HasStepSize, self).__init__()
#: param for Step size to be used for each iteration of optimization.
- self.stepSize = Param(self, "stepSize",
- "Step size to be used for each iteration of optimization.")
+ self.stepSize = Param(self, "stepSize", "Step size to be used for each iteration of optimization.")
if None is not None:
self._setDefault(stepSize=None)
diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py
index 75bb5d749c..ba6478dcd5 100644
--- a/python/pyspark/ml/tests.py
+++ b/python/pyspark/ml/tests.py
@@ -128,7 +128,7 @@ class ParamTests(PySparkTestCase):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
- self.assertEqual(maxIter.doc, "max number of iterations")
+ self.assertEqual(maxIter.doc, "max number of iterations (>= 0)")
self.assertTrue(maxIter.parent is testParams)
def test_params(self):
@@ -156,7 +156,7 @@ class ParamTests(PySparkTestCase):
self.assertEquals(
testParams.explainParams(),
"\n".join(["inputCol: input column name (undefined)",
- "maxIter: max number of iterations (default: 10, current: 100)"]))
+ "maxIter: max number of iterations (>= 0) (default: 10, current: 100)"]))
if __name__ == "__main__":