aboutsummaryrefslogtreecommitdiff
path: root/R
diff options
context:
space:
mode:
authorFelix Cheung <felixcheung_m@hotmail.com>2016-12-09 19:06:05 -0800
committerShivaram Venkataraman <shivaram@cs.berkeley.edu>2016-12-09 19:06:05 -0800
commit3e11d5bfef2f05bd6d42c4d6188eae6d63c963ef (patch)
tree65805afddad591d2cbb837a7308d988c9694392a /R
parentd2493a203e852adf63dde4e1fc993e8d11efec3d (diff)
downloadspark-3e11d5bfef2f05bd6d42c4d6188eae6d63c963ef.tar.gz
spark-3e11d5bfef2f05bd6d42c4d6188eae6d63c963ef.tar.bz2
spark-3e11d5bfef2f05bd6d42c4d6188eae6d63c963ef.zip
[SPARK-18807][SPARKR] Should suppress output print for calls to JVM methods with void return values
## What changes were proposed in this pull request? Several SparkR API calling into JVM methods that have void return values are getting printed out, especially when running in a REPL or IDE. example: ``` > setLogLevel("WARN") NULL ``` We should fix this to make the result more clear. Also found a small change to return value of dropTempView in 2.1 - adding doc and test for it. ## How was this patch tested? manually - I didn't find a expect_*() method in testthat for this Author: Felix Cheung <felixcheung_m@hotmail.com> Closes #16237 from felixcheung/rinvis.
Diffstat (limited to 'R')
-rw-r--r--R/pkg/R/SQLContext.R7
-rw-r--r--R/pkg/R/context.R6
-rw-r--r--R/pkg/R/sparkR.R6
-rw-r--r--R/pkg/inst/tests/testthat/test_sparkSQL.R14
4 files changed, 17 insertions, 16 deletions
diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R
index 38d83c6e5c..6f48cd6639 100644
--- a/R/pkg/R/SQLContext.R
+++ b/R/pkg/R/SQLContext.R
@@ -634,7 +634,7 @@ tableNames <- function(x, ...) {
cacheTable.default <- function(tableName) {
sparkSession <- getSparkSession()
catalog <- callJMethod(sparkSession, "catalog")
- callJMethod(catalog, "cacheTable", tableName)
+ invisible(callJMethod(catalog, "cacheTable", tableName))
}
cacheTable <- function(x, ...) {
@@ -663,7 +663,7 @@ cacheTable <- function(x, ...) {
uncacheTable.default <- function(tableName) {
sparkSession <- getSparkSession()
catalog <- callJMethod(sparkSession, "catalog")
- callJMethod(catalog, "uncacheTable", tableName)
+ invisible(callJMethod(catalog, "uncacheTable", tableName))
}
uncacheTable <- function(x, ...) {
@@ -686,7 +686,7 @@ uncacheTable <- function(x, ...) {
clearCache.default <- function() {
sparkSession <- getSparkSession()
catalog <- callJMethod(sparkSession, "catalog")
- callJMethod(catalog, "clearCache")
+ invisible(callJMethod(catalog, "clearCache"))
}
clearCache <- function() {
@@ -730,6 +730,7 @@ dropTempTable <- function(x, ...) {
#' If the view has been cached before, then it will also be uncached.
#'
#' @param viewName the name of the view to be dropped.
+#' @return TRUE if the view is dropped successfully, FALSE otherwise.
#' @rdname dropTempView
#' @name dropTempView
#' @export
diff --git a/R/pkg/R/context.R b/R/pkg/R/context.R
index 438d77a388..1138caf98e 100644
--- a/R/pkg/R/context.R
+++ b/R/pkg/R/context.R
@@ -87,8 +87,8 @@ objectFile <- function(sc, path, minPartitions = NULL) {
#' in the list are split into \code{numSlices} slices and distributed to nodes
#' in the cluster.
#'
-#' If size of serialized slices is larger than spark.r.maxAllocationLimit or (200MB), the function
-#' will write it to disk and send the file name to JVM. Also to make sure each slice is not
+#' If size of serialized slices is larger than spark.r.maxAllocationLimit or (200MB), the function
+#' will write it to disk and send the file name to JVM. Also to make sure each slice is not
#' larger than that limit, number of slices may be increased.
#'
#' @param sc SparkContext to use
@@ -379,5 +379,5 @@ spark.lapply <- function(list, func) {
#' @note setLogLevel since 2.0.0
setLogLevel <- function(level) {
sc <- getSparkContext()
- callJMethod(sc, "setLogLevel", level)
+ invisible(callJMethod(sc, "setLogLevel", level))
}
diff --git a/R/pkg/R/sparkR.R b/R/pkg/R/sparkR.R
index 43bff97553..c57cc8f285 100644
--- a/R/pkg/R/sparkR.R
+++ b/R/pkg/R/sparkR.R
@@ -427,7 +427,7 @@ sparkR.session <- function(
#' @method setJobGroup default
setJobGroup.default <- function(groupId, description, interruptOnCancel) {
sc <- getSparkContext()
- callJMethod(sc, "setJobGroup", groupId, description, interruptOnCancel)
+ invisible(callJMethod(sc, "setJobGroup", groupId, description, interruptOnCancel))
}
setJobGroup <- function(sc, groupId, description, interruptOnCancel) {
@@ -457,7 +457,7 @@ setJobGroup <- function(sc, groupId, description, interruptOnCancel) {
#' @method clearJobGroup default
clearJobGroup.default <- function() {
sc <- getSparkContext()
- callJMethod(sc, "clearJobGroup")
+ invisible(callJMethod(sc, "clearJobGroup"))
}
clearJobGroup <- function(sc) {
@@ -484,7 +484,7 @@ clearJobGroup <- function(sc) {
#' @method cancelJobGroup default
cancelJobGroup.default <- function(groupId) {
sc <- getSparkContext()
- callJMethod(sc, "cancelJobGroup", groupId)
+ invisible(callJMethod(sc, "cancelJobGroup", groupId))
}
cancelJobGroup <- function(sc, groupId) {
diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R
index c669c2e2e2..e8ccff8122 100644
--- a/R/pkg/inst/tests/testthat/test_sparkSQL.R
+++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R
@@ -576,7 +576,7 @@ test_that("test tableNames and tables", {
tables <- tables()
expect_equal(count(tables), 2)
suppressWarnings(dropTempTable("table1"))
- dropTempView("table2")
+ expect_true(dropTempView("table2"))
tables <- tables()
expect_equal(count(tables), 0)
@@ -589,7 +589,7 @@ test_that(
newdf <- sql("SELECT * FROM table1 where name = 'Michael'")
expect_is(newdf, "SparkDataFrame")
expect_equal(count(newdf), 1)
- dropTempView("table1")
+ expect_true(dropTempView("table1"))
createOrReplaceTempView(df, "dfView")
sqlCast <- collect(sql("select cast('2' as decimal) as x from dfView limit 1"))
@@ -600,7 +600,7 @@ test_that(
expect_equal(ncol(sqlCast), 1)
expect_equal(out[1], " x")
expect_equal(out[2], "1 2")
- dropTempView("dfView")
+ expect_true(dropTempView("dfView"))
})
test_that("test cache, uncache and clearCache", {
@@ -609,7 +609,7 @@ test_that("test cache, uncache and clearCache", {
cacheTable("table1")
uncacheTable("table1")
clearCache()
- dropTempView("table1")
+ expect_true(dropTempView("table1"))
})
test_that("insertInto() on a registered table", {
@@ -630,13 +630,13 @@ test_that("insertInto() on a registered table", {
insertInto(dfParquet2, "table1")
expect_equal(count(sql("select * from table1")), 5)
expect_equal(first(sql("select * from table1 order by age"))$name, "Michael")
- dropTempView("table1")
+ expect_true(dropTempView("table1"))
createOrReplaceTempView(dfParquet, "table1")
insertInto(dfParquet2, "table1", overwrite = TRUE)
expect_equal(count(sql("select * from table1")), 2)
expect_equal(first(sql("select * from table1 order by age"))$name, "Bob")
- dropTempView("table1")
+ expect_true(dropTempView("table1"))
unlink(jsonPath2)
unlink(parquetPath2)
@@ -650,7 +650,7 @@ test_that("tableToDF() returns a new DataFrame", {
expect_equal(count(tabledf), 3)
tabledf2 <- tableToDF("table1")
expect_equal(count(tabledf2), 3)
- dropTempView("table1")
+ expect_true(dropTempView("table1"))
})
test_that("toRDD() returns an RRDD", {