aboutsummaryrefslogtreecommitdiff
path: root/R/pkg/inst/tests/testthat/test_sparkSQL.R
diff options
context:
space:
mode:
Diffstat (limited to 'R/pkg/inst/tests/testthat/test_sparkSQL.R')
-rw-r--r--R/pkg/inst/tests/testthat/test_sparkSQL.R35
1 files changed, 35 insertions, 0 deletions
diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R
index 9d874a0988..f5ab601f27 100644
--- a/R/pkg/inst/tests/testthat/test_sparkSQL.R
+++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R
@@ -2544,6 +2544,41 @@ test_that("Spark version from SparkSession", {
expect_equal(ver, version)
})
+test_that("Call DataFrameWriter.save() API in Java without path and check argument types", {
+ df <- read.df(jsonPath, "json")
+ # This tests if the exception is thrown from JVM not from SparkR side.
+ # It makes sure that we can omit path argument in write.df API and then it calls
+ # DataFrameWriter.save() without path.
+ expect_error(write.df(df, source = "csv"),
+ "Error in save : illegal argument - 'path' is not specified")
+
+ # Arguments checking in R side.
+ expect_error(write.df(df, "data.tmp", source = c(1, 2)),
+ paste("source should be character, NULL or omitted. It is the datasource specified",
+ "in 'spark.sql.sources.default' configuration by default."))
+ expect_error(write.df(df, path = c(3)),
+ "path should be charactor, NULL or omitted.")
+ expect_error(write.df(df, mode = TRUE),
+ "mode should be charactor or omitted. It is 'error' by default.")
+})
+
+test_that("Call DataFrameWriter.load() API in Java without path and check argument types", {
+ # This tests if the exception is thrown from JVM not from SparkR side.
+ # It makes sure that we can omit path argument in read.df API and then it calls
+ # DataFrameWriter.load() without path.
+ expect_error(read.df(source = "json"),
+ paste("Error in loadDF : analysis error - Unable to infer schema for JSON at .",
+ "It must be specified manually"))
+ expect_error(read.df("arbitrary_path"), "Error in loadDF : analysis error - Path does not exist")
+
+ # Arguments checking in R side.
+ expect_error(read.df(path = c(3)),
+ "path should be charactor, NULL or omitted.")
+ expect_error(read.df(jsonPath, source = c(1, 2)),
+ paste("source should be character, NULL or omitted. It is the datasource specified",
+ "in 'spark.sql.sources.default' configuration by default."))
+})
+
unlink(parquetPath)
unlink(orcPath)
unlink(jsonPath)