diff options
Diffstat (limited to 'R/pkg/inst/tests/testthat/test_sparkSQL.R')
-rw-r--r-- | R/pkg/inst/tests/testthat/test_sparkSQL.R | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index aaa8fb498c..417a03ff61 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -196,18 +196,18 @@ test_that("create DataFrame from RDD", { expect_equal(dtypes(df), list(c("name", "string"), c("age", "int"), c("height", "float"))) expect_equal(as.list(collect(where(df, df$name == "John"))), list(name = "John", age = 19L, height = 176.5)) - expect_equal(getNumPartitions(toRDD(df)), 1) + expect_equal(getNumPartitions(df), 1) df <- as.DataFrame(cars, numPartitions = 2) - expect_equal(getNumPartitions(toRDD(df)), 2) + expect_equal(getNumPartitions(df), 2) df <- createDataFrame(cars, numPartitions = 3) - expect_equal(getNumPartitions(toRDD(df)), 3) + expect_equal(getNumPartitions(df), 3) # validate limit by num of rows df <- createDataFrame(cars, numPartitions = 60) - expect_equal(getNumPartitions(toRDD(df)), 50) + expect_equal(getNumPartitions(df), 50) # validate when 1 < (length(coll) / numSlices) << length(coll) df <- createDataFrame(cars, numPartitions = 20) - expect_equal(getNumPartitions(toRDD(df)), 20) + expect_equal(getNumPartitions(df), 20) df <- as.DataFrame(data.frame(0)) expect_is(df, "SparkDataFrame") @@ -215,7 +215,7 @@ test_that("create DataFrame from RDD", { expect_is(df, "SparkDataFrame") df <- as.DataFrame(data.frame(0), numPartitions = 2) # no data to partition, goes to 1 - expect_equal(getNumPartitions(toRDD(df)), 1) + expect_equal(getNumPartitions(df), 1) setHiveContext(sc) sql("CREATE TABLE people (name string, age double, height float)") @@ -234,7 +234,7 @@ test_that("createDataFrame uses files for large objects", { conf <- callJMethod(sparkSession, "conf") callJMethod(conf, "set", "spark.r.maxAllocationLimit", "100") df <- suppressWarnings(createDataFrame(iris, numPartitions = 3)) - expect_equal(getNumPartitions(toRDD(df)), 3) + expect_equal(getNumPartitions(df), 3) # Resetting the conf back to default value callJMethod(conf, "set", "spark.r.maxAllocationLimit", toString(.Machine$integer.max / 10)) |