aboutsummaryrefslogtreecommitdiff
path: root/R/pkg/inst/tests
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2016-04-30 00:45:44 -0700
committerXiangrui Meng <meng@databricks.com>2016-04-30 00:45:44 -0700
commitb3ea579314945dc1fcb4a260fbc7af8479d139f2 (patch)
tree273ca1fb218ede87bb6a09d3c8c32598e3e937d7 /R/pkg/inst/tests
parent7fbe1bb24d6c5657da133135419fb29a609e32c7 (diff)
downloadspark-b3ea579314945dc1fcb4a260fbc7af8479d139f2.tar.gz
spark-b3ea579314945dc1fcb4a260fbc7af8479d139f2.tar.bz2
spark-b3ea579314945dc1fcb4a260fbc7af8479d139f2.zip
[SPARK-14831][.2][ML][R] rename ml.save/ml.load to write.ml/read.ml
## What changes were proposed in this pull request? Continue the work of #12789 to rename ml.asve/ml.load to write.ml/read.ml, which are more consistent with read.df/write.df and other methods in SparkR. I didn't rename `data` to `df` because we still use `predict` for prediction, which uses `newData` to match the signature in R. ## How was this patch tested? Existing unit tests. cc: yanboliang thunterdb Author: Xiangrui Meng <meng@databricks.com> Closes #12807 from mengxr/SPARK-14831.
Diffstat (limited to 'R/pkg/inst/tests')
-rw-r--r--R/pkg/inst/tests/testthat/test_mllib.R40
1 files changed, 20 insertions, 20 deletions
diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R
index 18a4e78c99..dcd0296a3c 100644
--- a/R/pkg/inst/tests/testthat/test_mllib.R
+++ b/R/pkg/inst/tests/testthat/test_mllib.R
@@ -133,10 +133,10 @@ test_that("spark.glm save/load", {
s <- summary(m)
modelPath <- tempfile(pattern = "glm", fileext = ".tmp")
- ml.save(m, modelPath)
- expect_error(ml.save(m, modelPath))
- ml.save(m, modelPath, overwrite = TRUE)
- m2 <- ml.load(modelPath)
+ write.ml(m, modelPath)
+ expect_error(write.ml(m, modelPath))
+ write.ml(m, modelPath, overwrite = TRUE)
+ m2 <- read.ml(modelPath)
s2 <- summary(m2)
expect_equal(s$coefficients, s2$coefficients)
@@ -263,10 +263,10 @@ test_that("glm save/load", {
s <- summary(m)
modelPath <- tempfile(pattern = "glm", fileext = ".tmp")
- ml.save(m, modelPath)
- expect_error(ml.save(m, modelPath))
- ml.save(m, modelPath, overwrite = TRUE)
- m2 <- ml.load(modelPath)
+ write.ml(m, modelPath)
+ expect_error(write.ml(m, modelPath))
+ write.ml(m, modelPath, overwrite = TRUE)
+ m2 <- read.ml(modelPath)
s2 <- summary(m2)
expect_equal(s$coefficients, s2$coefficients)
@@ -311,10 +311,10 @@ test_that("spark.kmeans", {
# Test model save/load
modelPath <- tempfile(pattern = "kmeans", fileext = ".tmp")
- ml.save(model, modelPath)
- expect_error(ml.save(model, modelPath))
- ml.save(model, modelPath, overwrite = TRUE)
- model2 <- ml.load(modelPath)
+ write.ml(model, modelPath)
+ expect_error(write.ml(model, modelPath))
+ write.ml(model, modelPath, overwrite = TRUE)
+ model2 <- read.ml(modelPath)
summary2 <- summary(model2)
expect_equal(sort(unlist(summary.model$size)), sort(unlist(summary2$size)))
expect_equal(summary.model$coefficients, summary2$coefficients)
@@ -378,10 +378,10 @@ test_that("naiveBayes", {
# Test model save/load
modelPath <- tempfile(pattern = "naiveBayes", fileext = ".tmp")
- ml.save(m, modelPath)
- expect_error(ml.save(m, modelPath))
- ml.save(m, modelPath, overwrite = TRUE)
- m2 <- ml.load(modelPath)
+ write.ml(m, modelPath)
+ expect_error(write.ml(m, modelPath))
+ write.ml(m, modelPath, overwrite = TRUE)
+ m2 <- read.ml(modelPath)
s2 <- summary(m2)
expect_equal(s$apriori, s2$apriori)
expect_equal(s$tables, s2$tables)
@@ -435,10 +435,10 @@ test_that("spark.survreg", {
# Test model save/load
modelPath <- tempfile(pattern = "survreg", fileext = ".tmp")
- ml.save(model, modelPath)
- expect_error(ml.save(model, modelPath))
- ml.save(model, modelPath, overwrite = TRUE)
- model2 <- ml.load(modelPath)
+ write.ml(model, modelPath)
+ expect_error(write.ml(model, modelPath))
+ write.ml(model, modelPath, overwrite = TRUE)
+ model2 <- read.ml(modelPath)
stats2 <- summary(model2)
coefs2 <- as.vector(stats2$coefficients[, 1])
expect_equal(coefs, coefs2)