diff options
author | Matei Zaharia <matei@databricks.com> | 2014-01-10 00:12:43 -0800 |
---|---|---|
committer | Matei Zaharia <matei@databricks.com> | 2014-01-11 22:30:48 -0800 |
commit | 4c28a2bad8a6d64ee69213eede440837636fe58b (patch) | |
tree | ec33a07ead7ec3bd120c94594a42e2d19b556c79 /python/pyspark | |
parent | 9a0dfdf868187fb9a2e1656e4cf5f29d952ce5db (diff) | |
download | spark-4c28a2bad8a6d64ee69213eede440837636fe58b.tar.gz spark-4c28a2bad8a6d64ee69213eede440837636fe58b.tar.bz2 spark-4c28a2bad8a6d64ee69213eede440837636fe58b.zip |
Update some Python MLlib parameters to use camelCase, and tweak docs
We've used camel case in other Spark methods so it felt reasonable to
keep using it here and make the code match Scala/Java as much as
possible. Note that parameter names matter in Python because it allows
passing optional parameters by name.
Diffstat (limited to 'python/pyspark')
-rw-r--r-- | python/pyspark/mllib/classification.py | 14 | ||||
-rw-r--r-- | python/pyspark/mllib/regression.py | 28 |
2 files changed, 21 insertions, 21 deletions
diff --git a/python/pyspark/mllib/classification.py b/python/pyspark/mllib/classification.py index 03ff5a572e..19b90dfd6e 100644 --- a/python/pyspark/mllib/classification.py +++ b/python/pyspark/mllib/classification.py @@ -44,13 +44,13 @@ class LogisticRegressionModel(LinearModel): class LogisticRegressionWithSGD(object): @classmethod def train(cls, data, iterations=100, step=1.0, - mini_batch_fraction=1.0, initial_weights=None): + miniBatchFraction=1.0, initialWeights=None): """Train a logistic regression model on the given data.""" sc = data.context return _regression_train_wrapper(sc, lambda d, i: sc._jvm.PythonMLLibAPI().trainLogisticRegressionModelWithSGD(d._jrdd, - iterations, step, mini_batch_fraction, i), - LogisticRegressionModel, data, initial_weights) + iterations, step, miniBatchFraction, i), + LogisticRegressionModel, data, initialWeights) class SVMModel(LinearModel): """A support vector machine. @@ -67,14 +67,14 @@ class SVMModel(LinearModel): class SVMWithSGD(object): @classmethod - def train(cls, data, iterations=100, step=1.0, reg_param=1.0, - mini_batch_fraction=1.0, initial_weights=None): + def train(cls, data, iterations=100, step=1.0, regParam=1.0, + miniBatchFraction=1.0, initialWeights=None): """Train a support vector machine on the given data.""" sc = data.context return _regression_train_wrapper(sc, lambda d, i: sc._jvm.PythonMLLibAPI().trainSVMModelWithSGD(d._jrdd, - iterations, step, reg_param, mini_batch_fraction, i), - SVMModel, data, initial_weights) + iterations, step, regParam, miniBatchFraction, i), + SVMModel, data, initialWeights) class NaiveBayesModel(object): """ diff --git a/python/pyspark/mllib/regression.py b/python/pyspark/mllib/regression.py index e90b72893f..7656db07f6 100644 --- a/python/pyspark/mllib/regression.py +++ b/python/pyspark/mllib/regression.py @@ -47,57 +47,57 @@ class LinearRegressionModel(LinearRegressionModelBase): """A linear regression model derived from a least-squares fit. >>> data = array([0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 3.0]).reshape(4,2) - >>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), initial_weights=array([1.0])) + >>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), initialWeights=array([1.0])) """ class LinearRegressionWithSGD(object): @classmethod def train(cls, data, iterations=100, step=1.0, - mini_batch_fraction=1.0, initial_weights=None): + miniBatchFraction=1.0, initialWeights=None): """Train a linear regression model on the given data.""" sc = data.context return _regression_train_wrapper(sc, lambda d, i: sc._jvm.PythonMLLibAPI().trainLinearRegressionModelWithSGD( - d._jrdd, iterations, step, mini_batch_fraction, i), - LinearRegressionModel, data, initial_weights) + d._jrdd, iterations, step, miniBatchFraction, i), + LinearRegressionModel, data, initialWeights) class LassoModel(LinearRegressionModelBase): """A linear regression model derived from a least-squares fit with an l_1 penalty term. >>> data = array([0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 3.0]).reshape(4,2) - >>> lrm = LassoWithSGD.train(sc.parallelize(data), initial_weights=array([1.0])) + >>> lrm = LassoWithSGD.train(sc.parallelize(data), initialWeights=array([1.0])) """ class LassoWithSGD(object): @classmethod - def train(cls, data, iterations=100, step=1.0, reg_param=1.0, - mini_batch_fraction=1.0, initial_weights=None): + def train(cls, data, iterations=100, step=1.0, regParam=1.0, + miniBatchFraction=1.0, initialWeights=None): """Train a Lasso regression model on the given data.""" sc = data.context return _regression_train_wrapper(sc, lambda d, i: sc._jvm.PythonMLLibAPI().trainLassoModelWithSGD(d._jrdd, - iterations, step, reg_param, mini_batch_fraction, i), - LassoModel, data, initial_weights) + iterations, step, regParam, miniBatchFraction, i), + LassoModel, data, initialWeights) class RidgeRegressionModel(LinearRegressionModelBase): """A linear regression model derived from a least-squares fit with an l_2 penalty term. >>> data = array([0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 3.0]).reshape(4,2) - >>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), initial_weights=array([1.0])) + >>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), initialWeights=array([1.0])) """ class RidgeRegressionWithSGD(object): @classmethod - def train(cls, data, iterations=100, step=1.0, reg_param=1.0, - mini_batch_fraction=1.0, initial_weights=None): + def train(cls, data, iterations=100, step=1.0, regParam=1.0, + miniBatchFraction=1.0, initialWeights=None): """Train a ridge regression model on the given data.""" sc = data.context return _regression_train_wrapper(sc, lambda d, i: sc._jvm.PythonMLLibAPI().trainRidgeModelWithSGD(d._jrdd, - iterations, step, reg_param, mini_batch_fraction, i), - RidgeRegressionModel, data, initial_weights) + iterations, step, regParam, miniBatchFraction, i), + RidgeRegressionModel, data, initialWeights) def _test(): import doctest |