aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/ml/tests.py
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2015-05-18 12:02:18 -0700
committerXiangrui Meng <meng@databricks.com>2015-05-18 12:02:18 -0700
commit9c7e802a5a2b8cd3eb77642f84c54a8e976fc996 (patch)
tree2e3b7e367f57b64ef46733ee8b64aa258e58cca8 /python/pyspark/ml/tests.py
parent56ede88485cfca90974425fcb603b257be47229b (diff)
downloadspark-9c7e802a5a2b8cd3eb77642f84c54a8e976fc996.tar.gz
spark-9c7e802a5a2b8cd3eb77642f84c54a8e976fc996.tar.bz2
spark-9c7e802a5a2b8cd3eb77642f84c54a8e976fc996.zip
[SPARK-7380] [MLLIB] pipeline stages should be copyable in Python
This PR makes pipeline stages in Python copyable and hence simplifies some implementations. It also includes the following changes: 1. Rename `paramMap` and `defaultParamMap` to `_paramMap` and `_defaultParamMap`, respectively. 2. Accept a list of param maps in `fit`. 3. Use parent uid and name to identify param. jkbradley Author: Xiangrui Meng <meng@databricks.com> Author: Joseph K. Bradley <joseph@databricks.com> Closes #6088 from mengxr/SPARK-7380 and squashes the following commits: 413c463 [Xiangrui Meng] remove unnecessary doc 4159f35 [Xiangrui Meng] Merge remote-tracking branch 'apache/master' into SPARK-7380 611c719 [Xiangrui Meng] fix python style 68862b8 [Xiangrui Meng] update _java_obj initialization 927ad19 [Xiangrui Meng] fix ml/tests.py 0138fc3 [Xiangrui Meng] update feature transformers and fix a bug in RegexTokenizer 9ca44fb [Xiangrui Meng] simplify Java wrappers and add tests c7d84ef [Xiangrui Meng] update ml/tests.py to test copy params 7e0d27f [Xiangrui Meng] merge master 46840fb [Xiangrui Meng] update wrappers b6db1ed [Xiangrui Meng] update all self.paramMap to self._paramMap 46cb6ed [Xiangrui Meng] merge master a163413 [Xiangrui Meng] fix style 1042e80 [Xiangrui Meng] Merge remote-tracking branch 'apache/master' into SPARK-7380 9630eae [Xiangrui Meng] fix Identifiable._randomUID 13bd70a [Xiangrui Meng] update ml/tests.py 64a536c [Xiangrui Meng] use _fit/_transform/_evaluate to simplify the impl 02abf13 [Xiangrui Meng] Merge remote-tracking branch 'apache/master' into copyable-python 66ce18c [Joseph K. Bradley] some cleanups before sending to Xiangrui 7431272 [Joseph K. Bradley] Rebased with master
Diffstat (limited to 'python/pyspark/ml/tests.py')
-rw-r--r--python/pyspark/ml/tests.py105
1 files changed, 73 insertions, 32 deletions
diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py
index ba6478dcd5..10fe0ef8db 100644
--- a/python/pyspark/ml/tests.py
+++ b/python/pyspark/ml/tests.py
@@ -31,10 +31,12 @@ else:
import unittest
from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
-from pyspark.sql import DataFrame
-from pyspark.ml.param import Param
+from pyspark.sql import DataFrame, SQLContext
+from pyspark.ml.param import Param, Params
from pyspark.ml.param.shared import HasMaxIter, HasInputCol
-from pyspark.ml.pipeline import Estimator, Model, Pipeline, Transformer
+from pyspark.ml import Estimator, Model, Pipeline, Transformer
+from pyspark.ml.feature import *
+from pyspark.mllib.linalg import DenseVector
class MockDataset(DataFrame):
@@ -43,44 +45,43 @@ class MockDataset(DataFrame):
self.index = 0
-class MockTransformer(Transformer):
+class HasFake(Params):
+
+ def __init__(self):
+ super(HasFake, self).__init__()
+ self.fake = Param(self, "fake", "fake param")
+
+ def getFake(self):
+ return self.getOrDefault(self.fake)
+
+
+class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
- self.fake = Param(self, "fake", "fake")
self.dataset_index = None
- self.fake_param_value = None
- def transform(self, dataset, params={}):
+ def _transform(self, dataset):
self.dataset_index = dataset.index
- if self.fake in params:
- self.fake_param_value = params[self.fake]
dataset.index += 1
return dataset
-class MockEstimator(Estimator):
+class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
- self.fake = Param(self, "fake", "fake")
self.dataset_index = None
- self.fake_param_value = None
- self.model = None
- def fit(self, dataset, params={}):
+ def _fit(self, dataset):
self.dataset_index = dataset.index
- if self.fake in params:
- self.fake_param_value = params[self.fake]
model = MockModel()
- self.model = model
+ self._copyValues(model)
return model
-class MockModel(MockTransformer, Model):
-
- def __init__(self):
- super(MockModel, self).__init__()
+class MockModel(MockTransformer, Model, HasFake):
+ pass
class PipelineTests(PySparkTestCase):
@@ -91,19 +92,17 @@ class PipelineTests(PySparkTestCase):
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
- pipeline = Pipeline() \
- .setStages([estimator0, transformer1, estimator2, transformer3])
+ pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
- self.assertEqual(0, estimator0.dataset_index)
- self.assertEqual(0, estimator0.fake_param_value)
- model0 = estimator0.model
+ model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
+ self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
- self.assertEqual(1, transformer1.fake_param_value)
- self.assertEqual(2, estimator2.dataset_index)
- model2 = estimator2.model
- self.assertIsNone(model2.dataset_index, "The model produced by the last estimator should "
- "not be called during fit.")
+ self.assertEqual(1, transformer1.getFake())
+ self.assertEqual(2, dataset.index)
+ self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
+ self.assertIsNone(transformer3.dataset_index,
+ "The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
@@ -129,7 +128,7 @@ class ParamTests(PySparkTestCase):
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0)")
- self.assertTrue(maxIter.parent is testParams)
+ self.assertTrue(maxIter.parent == testParams.uid)
def test_params(self):
testParams = TestParams()
@@ -139,6 +138,7 @@ class ParamTests(PySparkTestCase):
params = testParams.params
self.assertEqual(params, [inputCol, maxIter])
+ self.assertTrue(testParams.hasParam(maxIter))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
@@ -147,6 +147,7 @@ class ParamTests(PySparkTestCase):
self.assertTrue(testParams.isSet(maxIter))
self.assertEquals(testParams.getMaxIter(), 100)
+ self.assertTrue(testParams.hasParam(inputCol))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
@@ -159,5 +160,45 @@ class ParamTests(PySparkTestCase):
"maxIter: max number of iterations (>= 0) (default: 10, current: 100)"]))
+class FeatureTests(PySparkTestCase):
+
+ def test_binarizer(self):
+ b0 = Binarizer()
+ self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
+ self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
+ self.assertTrue(b0.hasDefault(b0.threshold))
+ self.assertEqual(b0.getThreshold(), 0.0)
+ b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
+ self.assertTrue(all([b0.isSet(p) for p in b0.params]))
+ self.assertEqual(b0.getThreshold(), 1.0)
+ self.assertEqual(b0.getInputCol(), "input")
+ self.assertEqual(b0.getOutputCol(), "output")
+
+ b0c = b0.copy({b0.threshold: 2.0})
+ self.assertEqual(b0c.uid, b0.uid)
+ self.assertListEqual(b0c.params, b0.params)
+ self.assertEqual(b0c.getThreshold(), 2.0)
+
+ b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
+ self.assertNotEqual(b1.uid, b0.uid)
+ self.assertEqual(b1.getThreshold(), 2.0)
+ self.assertEqual(b1.getInputCol(), "input")
+ self.assertEqual(b1.getOutputCol(), "output")
+
+ def test_idf(self):
+ sqlContext = SQLContext(self.sc)
+ dataset = sqlContext.createDataFrame([
+ (DenseVector([1.0, 2.0]),),
+ (DenseVector([0.0, 1.0]),),
+ (DenseVector([3.0, 0.2]),)], ["tf"])
+ idf0 = IDF(inputCol="tf")
+ self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
+ idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
+ self.assertEqual(idf0m.uid, idf0.uid,
+ "Model should inherit the UID from its parent estimator.")
+ output = idf0m.transform(dataset)
+ self.assertIsNotNone(output.head().idf)
+
+
if __name__ == "__main__":
unittest.main()