aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/mllib/classification.py
diff options
context:
space:
mode:
authorMatei Zaharia <matei@databricks.com>2014-01-10 00:12:43 -0800
committerMatei Zaharia <matei@databricks.com>2014-01-11 22:30:48 -0800
commit4c28a2bad8a6d64ee69213eede440837636fe58b (patch)
treeec33a07ead7ec3bd120c94594a42e2d19b556c79 /python/pyspark/mllib/classification.py
parent9a0dfdf868187fb9a2e1656e4cf5f29d952ce5db (diff)
downloadspark-4c28a2bad8a6d64ee69213eede440837636fe58b.tar.gz
spark-4c28a2bad8a6d64ee69213eede440837636fe58b.tar.bz2
spark-4c28a2bad8a6d64ee69213eede440837636fe58b.zip
Update some Python MLlib parameters to use camelCase, and tweak docs
We've used camel case in other Spark methods so it felt reasonable to keep using it here and make the code match Scala/Java as much as possible. Note that parameter names matter in Python because it allows passing optional parameters by name.
Diffstat (limited to 'python/pyspark/mllib/classification.py')
-rw-r--r--python/pyspark/mllib/classification.py14
1 files changed, 7 insertions, 7 deletions
diff --git a/python/pyspark/mllib/classification.py b/python/pyspark/mllib/classification.py
index 03ff5a572e..19b90dfd6e 100644
--- a/python/pyspark/mllib/classification.py
+++ b/python/pyspark/mllib/classification.py
@@ -44,13 +44,13 @@ class LogisticRegressionModel(LinearModel):
class LogisticRegressionWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0,
- mini_batch_fraction=1.0, initial_weights=None):
+ miniBatchFraction=1.0, initialWeights=None):
"""Train a logistic regression model on the given data."""
sc = data.context
return _regression_train_wrapper(sc, lambda d, i:
sc._jvm.PythonMLLibAPI().trainLogisticRegressionModelWithSGD(d._jrdd,
- iterations, step, mini_batch_fraction, i),
- LogisticRegressionModel, data, initial_weights)
+ iterations, step, miniBatchFraction, i),
+ LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearModel):
"""A support vector machine.
@@ -67,14 +67,14 @@ class SVMModel(LinearModel):
class SVMWithSGD(object):
@classmethod
- def train(cls, data, iterations=100, step=1.0, reg_param=1.0,
- mini_batch_fraction=1.0, initial_weights=None):
+ def train(cls, data, iterations=100, step=1.0, regParam=1.0,
+ miniBatchFraction=1.0, initialWeights=None):
"""Train a support vector machine on the given data."""
sc = data.context
return _regression_train_wrapper(sc, lambda d, i:
sc._jvm.PythonMLLibAPI().trainSVMModelWithSGD(d._jrdd,
- iterations, step, reg_param, mini_batch_fraction, i),
- SVMModel, data, initial_weights)
+ iterations, step, regParam, miniBatchFraction, i),
+ SVMModel, data, initialWeights)
class NaiveBayesModel(object):
"""