aboutsummaryrefslogtreecommitdiff
path: root/R/pkg/inst/tests/testthat/test_mllib.R
diff options
context:
space:
mode:
authorWeichenXu <WeichenXu123@outlook.com>2016-09-23 11:14:22 -0700
committerFelix Cheung <felixcheung@apache.org>2016-09-23 11:14:22 -0700
commitf89808b0fdbc04e1bdff1489a6ec4c84ddb2adc4 (patch)
treed7f2cb9d4e595f02e675b71ff19038fe203e2b1a /R/pkg/inst/tests/testthat/test_mllib.R
parent90d5754212425d55f992c939a2bc7d9ac6ef92b8 (diff)
downloadspark-f89808b0fdbc04e1bdff1489a6ec4c84ddb2adc4.tar.gz
spark-f89808b0fdbc04e1bdff1489a6ec4c84ddb2adc4.tar.bz2
spark-f89808b0fdbc04e1bdff1489a6ec4c84ddb2adc4.zip
[SPARK-17499][SPARKR][ML][MLLIB] make the default params in sparkR spark.mlp consistent with MultilayerPerceptronClassifier
## What changes were proposed in this pull request? update `MultilayerPerceptronClassifierWrapper.fit` paramter type: `layers: Array[Int]` `seed: String` update several default params in sparkR `spark.mlp`: `tol` --> 1e-6 `stepSize` --> 0.03 `seed` --> NULL ( when seed == NULL, the scala-side wrapper regard it as a `null` value and the seed will use the default one ) r-side `seed` only support 32bit integer. remove `layers` default value, and move it in front of those parameters with default value. add `layers` parameter validation check. ## How was this patch tested? tests added. Author: WeichenXu <WeichenXu123@outlook.com> Closes #15051 from WeichenXu123/update_py_mlp_default.
Diffstat (limited to 'R/pkg/inst/tests/testthat/test_mllib.R')
-rw-r--r--R/pkg/inst/tests/testthat/test_mllib.R19
1 files changed, 19 insertions, 0 deletions
diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R
index 24c40a8823..a1eaaf2091 100644
--- a/R/pkg/inst/tests/testthat/test_mllib.R
+++ b/R/pkg/inst/tests/testthat/test_mllib.R
@@ -391,6 +391,25 @@ test_that("spark.mlp", {
unlink(modelPath)
+ # Test default parameter
+ model <- spark.mlp(df, layers = c(4, 5, 4, 3))
+ mlpPredictions <- collect(select(predict(model, mlpTestDF), "prediction"))
+ expect_equal(head(mlpPredictions$prediction, 10), c(1, 1, 1, 1, 0, 1, 2, 2, 1, 0))
+
+ # Test illegal parameter
+ expect_error(spark.mlp(df, layers = NULL), "layers must be a integer vector with length > 1.")
+ expect_error(spark.mlp(df, layers = c()), "layers must be a integer vector with length > 1.")
+ expect_error(spark.mlp(df, layers = c(3)), "layers must be a integer vector with length > 1.")
+
+ # Test random seed
+ # default seed
+ model <- spark.mlp(df, layers = c(4, 5, 4, 3), maxIter = 10)
+ mlpPredictions <- collect(select(predict(model, mlpTestDF), "prediction"))
+ expect_equal(head(mlpPredictions$prediction, 12), c(1, 1, 1, 1, 0, 1, 2, 2, 1, 2, 0, 1))
+ # seed equals 10
+ model <- spark.mlp(df, layers = c(4, 5, 4, 3), maxIter = 10, seed = 10)
+ mlpPredictions <- collect(select(predict(model, mlpTestDF), "prediction"))
+ expect_equal(head(mlpPredictions$prediction, 12), c(1, 1, 1, 1, 2, 1, 2, 2, 1, 0, 0, 1))
})
test_that("spark.naiveBayes", {