aboutsummaryrefslogtreecommitdiff
path: root/R/pkg/inst
diff options
context:
space:
mode:
Diffstat (limited to 'R/pkg/inst')
-rw-r--r--R/pkg/inst/tests/testthat/test_Serde.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_binaryFile.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_binary_function.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_broadcast.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_context.R23
-rw-r--r--R/pkg/inst/tests/testthat/test_includePackage.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_mllib.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_parallelize_collect.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_rdd.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_shuffle.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_sparkSQL.R2
-rw-r--r--R/pkg/inst/tests/testthat/test_take.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_textFile.R4
-rw-r--r--R/pkg/inst/tests/testthat/test_utils.R4
14 files changed, 48 insertions, 25 deletions
diff --git a/R/pkg/inst/tests/testthat/test_Serde.R b/R/pkg/inst/tests/testthat/test_Serde.R
index 96fb6dda26..b5f6f1b54f 100644
--- a/R/pkg/inst/tests/testthat/test_Serde.R
+++ b/R/pkg/inst/tests/testthat/test_Serde.R
@@ -17,7 +17,7 @@
context("SerDe functionality")
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
test_that("SerDe of primitive types", {
x <- callJStatic("SparkRHandler", "echo", 1L)
@@ -75,3 +75,5 @@ test_that("SerDe of list of lists", {
y <- callJStatic("SparkRHandler", "echo", x)
expect_equal(x, y)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_binaryFile.R b/R/pkg/inst/tests/testthat/test_binaryFile.R
index b69f017de8..56ac8eb728 100644
--- a/R/pkg/inst/tests/testthat/test_binaryFile.R
+++ b/R/pkg/inst/tests/testthat/test_binaryFile.R
@@ -18,7 +18,7 @@
context("functions on binary files")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
mockFile <- c("Spark is pretty.", "Spark is awesome.")
@@ -88,3 +88,5 @@ test_that("saveAsObjectFile()/objectFile() works with multiple paths", {
unlink(fileName1, recursive = TRUE)
unlink(fileName2, recursive = TRUE)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_binary_function.R b/R/pkg/inst/tests/testthat/test_binary_function.R
index 6f51d20687..ae7abe20cc 100644
--- a/R/pkg/inst/tests/testthat/test_binary_function.R
+++ b/R/pkg/inst/tests/testthat/test_binary_function.R
@@ -18,7 +18,7 @@
context("binary functions")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Data
@@ -100,3 +100,5 @@ test_that("zipPartitions() on RDDs", {
unlink(fileName)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_broadcast.R b/R/pkg/inst/tests/testthat/test_broadcast.R
index cf1d432771..c7fefb5cf9 100644
--- a/R/pkg/inst/tests/testthat/test_broadcast.R
+++ b/R/pkg/inst/tests/testthat/test_broadcast.R
@@ -18,7 +18,7 @@
context("broadcast variables")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Partitioned data
@@ -47,3 +47,5 @@ test_that("without using broadcast variable", {
expected <- list(sum(randomMat) * 1, sum(randomMat) * 2)
expect_equal(actual, expected)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_context.R b/R/pkg/inst/tests/testthat/test_context.R
index 2a1bd61b11..8bd134a58d 100644
--- a/R/pkg/inst/tests/testthat/test_context.R
+++ b/R/pkg/inst/tests/testthat/test_context.R
@@ -63,18 +63,14 @@ test_that("repeatedly starting and stopping SparkR", {
}
})
-# Does not work consistently even with Hive off
-# nolint start
-# test_that("repeatedly starting and stopping SparkR", {
-# for (i in 1:4) {
-# sparkR.session(enableHiveSupport = FALSE)
-# df <- createDataFrame(data.frame(dummy=1:i))
-# expect_equal(count(df), i)
-# sparkR.session.stop()
-# Sys.sleep(5) # Need more time to shutdown Hive metastore
-# }
-# })
-# nolint end
+test_that("repeatedly starting and stopping SparkSession", {
+ for (i in 1:4) {
+ sparkR.session(enableHiveSupport = FALSE)
+ df <- createDataFrame(data.frame(dummy = 1:i))
+ expect_equal(count(df), i)
+ sparkR.session.stop()
+ }
+})
test_that("rdd GC across sparkR.stop", {
sc <- sparkR.sparkContext() # sc should get id 0
@@ -96,6 +92,7 @@ test_that("rdd GC across sparkR.stop", {
count(rdd3)
count(rdd4)
+ sparkR.session.stop()
})
test_that("job group functions can be called", {
@@ -164,7 +161,7 @@ test_that("sparkJars sparkPackages as comma-separated strings", {
})
test_that("spark.lapply should perform simple transforms", {
- sc <- sparkR.sparkContext()
+ sparkR.sparkContext()
doubled <- spark.lapply(1:10, function(x) { 2 * x })
expect_equal(doubled, as.list(2 * 1:10))
sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_includePackage.R b/R/pkg/inst/tests/testthat/test_includePackage.R
index d6a3766539..ca2b900572 100644
--- a/R/pkg/inst/tests/testthat/test_includePackage.R
+++ b/R/pkg/inst/tests/testthat/test_includePackage.R
@@ -18,7 +18,7 @@
context("include R packages")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Partitioned data
@@ -56,3 +56,5 @@ test_that("use include package", {
actual <- collect(data)
}
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R
index 753da81760..ab390a86d1 100644
--- a/R/pkg/inst/tests/testthat/test_mllib.R
+++ b/R/pkg/inst/tests/testthat/test_mllib.R
@@ -20,7 +20,7 @@ library(testthat)
context("MLlib functions")
# Tests for MLlib functions in SparkR
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
test_that("formula of spark.glm", {
training <- suppressWarnings(createDataFrame(iris))
@@ -453,3 +453,5 @@ test_that("spark.survreg", {
expect_equal(predict(model, rData)[[1]], 3.724591, tolerance = 1e-4)
}
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_parallelize_collect.R b/R/pkg/inst/tests/testthat/test_parallelize_collect.R
index f79a8a70aa..959d7ab9e6 100644
--- a/R/pkg/inst/tests/testthat/test_parallelize_collect.R
+++ b/R/pkg/inst/tests/testthat/test_parallelize_collect.R
@@ -33,7 +33,7 @@ numPairs <- list(list(1, 1), list(1, 2), list(2, 2), list(2, 3))
strPairs <- list(list(strList, strList), list(strList, strList))
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
jsc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Tests
@@ -108,3 +108,5 @@ test_that("parallelize() and collect() work for lists of pairs (pairwise data)",
expect_equal(collect(strPairsRDDD1), strPairs)
expect_equal(collect(strPairsRDDD2), strPairs)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_rdd.R b/R/pkg/inst/tests/testthat/test_rdd.R
index 429311d292..508a3a7dfd 100644
--- a/R/pkg/inst/tests/testthat/test_rdd.R
+++ b/R/pkg/inst/tests/testthat/test_rdd.R
@@ -18,7 +18,7 @@
context("basic RDD functions")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Data
@@ -800,3 +800,5 @@ test_that("Test correct concurrency of RRDD.compute()", {
count <- callJMethod(zrdd, "count")
expect_equal(count, 1000)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_shuffle.R b/R/pkg/inst/tests/testthat/test_shuffle.R
index 7d4f342016..2586056773 100644
--- a/R/pkg/inst/tests/testthat/test_shuffle.R
+++ b/R/pkg/inst/tests/testthat/test_shuffle.R
@@ -18,7 +18,7 @@
context("partitionBy, groupByKey, reduceByKey etc.")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Data
@@ -220,3 +220,5 @@ test_that("test partitionBy with string keys", {
expect_equal(sortKeyValueList(actual_first), sortKeyValueList(expected_first))
expect_equal(sortKeyValueList(actual_second), sortKeyValueList(expected_second))
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R
index f275284b09..3f3cb766b3 100644
--- a/R/pkg/inst/tests/testthat/test_sparkSQL.R
+++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R
@@ -2489,3 +2489,5 @@ unlink(parquetPath)
unlink(orcPath)
unlink(jsonPath)
unlink(jsonPathNa)
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_take.R b/R/pkg/inst/tests/testthat/test_take.R
index daf5e41abe..07f00c9915 100644
--- a/R/pkg/inst/tests/testthat/test_take.R
+++ b/R/pkg/inst/tests/testthat/test_take.R
@@ -30,7 +30,7 @@ strList <- list("Dexter Morgan: Blood. Sometimes it sets my teeth on edge, ",
"raising me. But they're both dead now. I didn't kill them. Honest.")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
test_that("take() gives back the original elements in correct count and order", {
@@ -65,3 +65,5 @@ test_that("take() gives back the original elements in correct count and order",
expect_equal(length(take(numListRDD, 0)), 0)
expect_equal(length(take(numVectorRDD, 0)), 0)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_textFile.R b/R/pkg/inst/tests/testthat/test_textFile.R
index 7b2cc74753..b7dcbe472a 100644
--- a/R/pkg/inst/tests/testthat/test_textFile.R
+++ b/R/pkg/inst/tests/testthat/test_textFile.R
@@ -18,7 +18,7 @@
context("the textFile() function")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
mockFile <- c("Spark is pretty.", "Spark is awesome.")
@@ -160,3 +160,5 @@ test_that("Pipelined operations on RDDs created using textFile", {
unlink(fileName)
})
+
+sparkR.session.stop()
diff --git a/R/pkg/inst/tests/testthat/test_utils.R b/R/pkg/inst/tests/testthat/test_utils.R
index 21a119a06b..58ff3debfa 100644
--- a/R/pkg/inst/tests/testthat/test_utils.R
+++ b/R/pkg/inst/tests/testthat/test_utils.R
@@ -18,7 +18,7 @@
context("functions in utils.R")
# JavaSparkContext handle
-sparkSession <- sparkR.session()
+sparkSession <- sparkR.session(enableHiveSupport = FALSE)
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
test_that("convertJListToRList() gives back (deserializes) the original JLists
@@ -182,3 +182,5 @@ test_that("overrideEnvs", {
expect_equal(config[["param_only"]], "blah")
expect_equal(config[["config_only"]], "ok")
})
+
+sparkR.session.stop()