diff options
author | Brendan Dwyer <brendan.dwyer@ibm.com> | 2017-04-12 09:24:41 +0100 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2017-04-12 09:24:41 +0100 |
commit | 044f7ecbfd75ac5a13bfc8cd01990e195c9bd178 (patch) | |
tree | 6df1bea6fc65d2031e94f09eeb6c9796814a8ea4 /R/pkg/inst/tests/testthat/test_sparkSQL.R | |
parent | bca4259f12b32eeb156b6755d0ec5e16d8e566b3 (diff) | |
download | spark-044f7ecbfd75ac5a13bfc8cd01990e195c9bd178.tar.gz spark-044f7ecbfd75ac5a13bfc8cd01990e195c9bd178.tar.bz2 spark-044f7ecbfd75ac5a13bfc8cd01990e195c9bd178.zip |
[SPARK-20298][SPARKR][MINOR] fixed spelling mistake "charactor"
## What changes were proposed in this pull request?
Fixed spelling of "charactor"
## How was this patch tested?
Spelling change only
Author: Brendan Dwyer <brendan.dwyer@ibm.com>
Closes #17611 from bdwyer2/SPARK-20298.
Diffstat (limited to 'R/pkg/inst/tests/testthat/test_sparkSQL.R')
-rw-r--r-- | R/pkg/inst/tests/testthat/test_sparkSQL.R | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index 58cf24256a..3fbb618ddf 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -2926,9 +2926,9 @@ test_that("Call DataFrameWriter.save() API in Java without path and check argume paste("source should be character, NULL or omitted. It is the datasource specified", "in 'spark.sql.sources.default' configuration by default.")) expect_error(write.df(df, path = c(3)), - "path should be charactor, NULL or omitted.") + "path should be character, NULL or omitted.") expect_error(write.df(df, mode = TRUE), - "mode should be charactor or omitted. It is 'error' by default.") + "mode should be character or omitted. It is 'error' by default.") }) test_that("Call DataFrameWriter.load() API in Java without path and check argument types", { @@ -2947,7 +2947,7 @@ test_that("Call DataFrameWriter.load() API in Java without path and check argume # Arguments checking in R side. expect_error(read.df(path = c(3)), - "path should be charactor, NULL or omitted.") + "path should be character, NULL or omitted.") expect_error(read.df(jsonPath, source = c(1, 2)), paste("source should be character, NULL or omitted. It is the datasource specified", "in 'spark.sql.sources.default' configuration by default.")) |