aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/ml/param
diff options
context:
space:
mode:
authorBurak Yavuz <brkyvz@gmail.com>2015-05-07 10:25:41 -0700
committerXiangrui Meng <meng@databricks.com>2015-05-07 10:25:41 -0700
commit9e2ffb13287e6efe256b8d23a4654e4cc305e20b (patch)
tree79a13615578199c2907d371c965ef031307c47b9 /python/pyspark/ml/param
parented9be06a4797bbb678355b361054c8872ac20b75 (diff)
downloadspark-9e2ffb13287e6efe256b8d23a4654e4cc305e20b.tar.gz
spark-9e2ffb13287e6efe256b8d23a4654e4cc305e20b.tar.bz2
spark-9e2ffb13287e6efe256b8d23a4654e4cc305e20b.zip
[SPARK-7388] [SPARK-7383] wrapper for VectorAssembler in Python
The wrapper required the implementation of the `ArrayParam`, because `Array[T]` is hard to obtain from Python. `ArrayParam` has an extra function called `wCast` which is an internal function to obtain `Array[T]` from `Seq[T]` Author: Burak Yavuz <brkyvz@gmail.com> Author: Xiangrui Meng <meng@databricks.com> Closes #5930 from brkyvz/ml-feat and squashes the following commits: 73e745f [Burak Yavuz] Merge pull request #3 from mengxr/SPARK-7388 c221db9 [Xiangrui Meng] overload StringArrayParam.w c81072d [Burak Yavuz] addressed comments 99c2ebf [Burak Yavuz] add to python_shared_params 39ecb07 [Burak Yavuz] fix scalastyle 7f7ea2a [Burak Yavuz] [SPARK-7388][SPARK-7383] wrapper for VectorAssembler in Python
Diffstat (limited to 'python/pyspark/ml/param')
-rw-r--r--python/pyspark/ml/param/_shared_params_code_gen.py1
-rw-r--r--python/pyspark/ml/param/shared.py29
2 files changed, 30 insertions, 0 deletions
diff --git a/python/pyspark/ml/param/_shared_params_code_gen.py b/python/pyspark/ml/param/_shared_params_code_gen.py
index c71c823db2..c1c8e921dd 100644
--- a/python/pyspark/ml/param/_shared_params_code_gen.py
+++ b/python/pyspark/ml/param/_shared_params_code_gen.py
@@ -95,6 +95,7 @@ if __name__ == "__main__":
("predictionCol", "prediction column name", "'prediction'"),
("rawPredictionCol", "raw prediction column name", "'rawPrediction'"),
("inputCol", "input column name", None),
+ ("inputCols", "input column names", None),
("outputCol", "output column name", None),
("numFeatures", "number of features", None)]
code = []
diff --git a/python/pyspark/ml/param/shared.py b/python/pyspark/ml/param/shared.py
index 4f243844f8..aaf80f0008 100644
--- a/python/pyspark/ml/param/shared.py
+++ b/python/pyspark/ml/param/shared.py
@@ -223,6 +223,35 @@ class HasInputCol(Params):
return self.getOrDefault(self.inputCol)
+class HasInputCols(Params):
+ """
+ Mixin for param inputCols: input column names.
+ """
+
+ # a placeholder to make it appear in the generated doc
+ inputCols = Param(Params._dummy(), "inputCols", "input column names")
+
+ def __init__(self):
+ super(HasInputCols, self).__init__()
+ #: param for input column names
+ self.inputCols = Param(self, "inputCols", "input column names")
+ if None is not None:
+ self._setDefault(inputCols=None)
+
+ def setInputCols(self, value):
+ """
+ Sets the value of :py:attr:`inputCols`.
+ """
+ self.paramMap[self.inputCols] = value
+ return self
+
+ def getInputCols(self):
+ """
+ Gets the value of inputCols or its default value.
+ """
+ return self.getOrDefault(self.inputCols)
+
+
class HasOutputCol(Params):
"""
Mixin for param outputCol: output column name.