aboutsummaryrefslogtreecommitdiff
path: root/R/pkg
diff options
context:
space:
mode:
authorYu ISHIKAWA <yuu.ishikawa@gmail.com>2015-07-31 09:33:38 -0700
committerShivaram Venkataraman <shivaram@cs.berkeley.edu>2015-07-31 09:33:38 -0700
commitfc0e57e5aba82a3f227fef05a843283e2ec893fc (patch)
treefa1b874a7600afc35065e1117cb4d7638ac56a5a /R/pkg
parent6bba7509a932aa4d39266df2d15b1370b7aabbec (diff)
downloadspark-fc0e57e5aba82a3f227fef05a843283e2ec893fc.tar.gz
spark-fc0e57e5aba82a3f227fef05a843283e2ec893fc.tar.bz2
spark-fc0e57e5aba82a3f227fef05a843283e2ec893fc.zip
[SPARK-9053] [SPARKR] Fix spaces around parens, infix operators etc.
### JIRA [[SPARK-9053] Fix spaces around parens, infix operators etc. - ASF JIRA](https://issues.apache.org/jira/browse/SPARK-9053) ### The Result of `lint-r` [The result of lint-r at the rivision:a4c83cb1e4b066cd60264b6572fd3e51d160d26a](https://gist.github.com/yu-iskw/d253d7f8ef351f86443d) Author: Yu ISHIKAWA <yuu.ishikawa@gmail.com> Closes #7584 from yu-iskw/SPARK-9053 and squashes the following commits: 613170f [Yu ISHIKAWA] Ignore a warning about a space before a left parentheses ede61e1 [Yu ISHIKAWA] Ignores two warnings about a space before a left parentheses. TODO: After updating `lintr`, we will remove the ignores de3e0db [Yu ISHIKAWA] Add '## nolint start' & '## nolint end' statement to ignore infix space warnings e233ea8 [Yu ISHIKAWA] [SPARK-9053][SparkR] Fix spaces around parens, infix operators etc.
Diffstat (limited to 'R/pkg')
-rw-r--r--R/pkg/R/DataFrame.R4
-rw-r--r--R/pkg/R/RDD.R7
-rw-r--r--R/pkg/R/column.R2
-rw-r--r--R/pkg/R/context.R2
-rw-r--r--R/pkg/R/pairRDD.R2
-rw-r--r--R/pkg/R/utils.R4
-rw-r--r--R/pkg/inst/tests/test_binary_function.R2
-rw-r--r--R/pkg/inst/tests/test_rdd.R6
-rw-r--r--R/pkg/inst/tests/test_sparkSQL.R4
9 files changed, 21 insertions, 12 deletions
diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R
index f4c93d3c7d..b31ad3729e 100644
--- a/R/pkg/R/DataFrame.R
+++ b/R/pkg/R/DataFrame.R
@@ -1322,9 +1322,11 @@ setMethod("write.df",
"org.apache.spark.sql.parquet")
}
allModes <- c("append", "overwrite", "error", "ignore")
+ # nolint start
if (!(mode %in% allModes)) {
stop('mode should be one of "append", "overwrite", "error", "ignore"')
}
+ # nolint end
jmode <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "saveMode", mode)
options <- varargsToEnv(...)
if (!is.null(path)) {
@@ -1384,9 +1386,11 @@ setMethod("saveAsTable",
"org.apache.spark.sql.parquet")
}
allModes <- c("append", "overwrite", "error", "ignore")
+ # nolint start
if (!(mode %in% allModes)) {
stop('mode should be one of "append", "overwrite", "error", "ignore"')
}
+ # nolint end
jmode <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "saveMode", mode)
options <- varargsToEnv(...)
callJMethod(df@sdf, "saveAsTable", tableName, source, jmode, options)
diff --git a/R/pkg/R/RDD.R b/R/pkg/R/RDD.R
index d2d0967092..2a013b3dbb 100644
--- a/R/pkg/R/RDD.R
+++ b/R/pkg/R/RDD.R
@@ -85,7 +85,9 @@ setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val)
isPipelinable <- function(rdd) {
e <- rdd@env
+ # nolint start
!(e$isCached || e$isCheckpointed)
+ # nolint end
}
if (!inherits(prev, "PipelinedRDD") || !isPipelinable(prev)) {
@@ -97,7 +99,8 @@ setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val)
# prev_serializedMode is used during the delayed computation of JRDD in getJRDD
} else {
pipelinedFunc <- function(partIndex, part) {
- func(partIndex, prev@func(partIndex, part))
+ f <- prev@func
+ func(partIndex, f(partIndex, part))
}
.Object@func <- cleanClosure(pipelinedFunc)
.Object@prev_jrdd <- prev@prev_jrdd # maintain the pipeline
@@ -841,7 +844,7 @@ setMethod("sampleRDD",
if (withReplacement) {
count <- rpois(1, fraction)
if (count > 0) {
- res[(len + 1):(len + count)] <- rep(list(elem), count)
+ res[ (len + 1) : (len + count) ] <- rep(list(elem), count)
len <- len + count
}
} else {
diff --git a/R/pkg/R/column.R b/R/pkg/R/column.R
index 2892e1416c..eeaf9f193b 100644
--- a/R/pkg/R/column.R
+++ b/R/pkg/R/column.R
@@ -65,7 +65,7 @@ functions <- c("min", "max", "sum", "avg", "mean", "count", "abs", "sqrt",
"acos", "asin", "atan", "cbrt", "ceiling", "cos", "cosh", "exp",
"expm1", "floor", "log", "log10", "log1p", "rint", "sign",
"sin", "sinh", "tan", "tanh", "toDegrees", "toRadians")
-binary_mathfunctions<- c("atan2", "hypot")
+binary_mathfunctions <- c("atan2", "hypot")
createOperator <- function(op) {
setMethod(op,
diff --git a/R/pkg/R/context.R b/R/pkg/R/context.R
index 43be9c904f..720990e1c6 100644
--- a/R/pkg/R/context.R
+++ b/R/pkg/R/context.R
@@ -121,7 +121,7 @@ parallelize <- function(sc, coll, numSlices = 1) {
numSlices <- length(coll)
sliceLen <- ceiling(length(coll) / numSlices)
- slices <- split(coll, rep(1:(numSlices + 1), each = sliceLen)[1:length(coll)])
+ slices <- split(coll, rep(1: (numSlices + 1), each = sliceLen)[1:length(coll)])
# Serialize each slice: obtain a list of raws, or a list of lists (slices) of
# 2-tuples of raws
diff --git a/R/pkg/R/pairRDD.R b/R/pkg/R/pairRDD.R
index 83801d3209..199c3fd6ab 100644
--- a/R/pkg/R/pairRDD.R
+++ b/R/pkg/R/pairRDD.R
@@ -879,7 +879,7 @@ setMethod("sampleByKey",
if (withReplacement) {
count <- rpois(1, frac)
if (count > 0) {
- res[(len + 1):(len + count)] <- rep(list(elem), count)
+ res[ (len + 1) : (len + count) ] <- rep(list(elem), count)
len <- len + count
}
} else {
diff --git a/R/pkg/R/utils.R b/R/pkg/R/utils.R
index 3f45589a50..4f9f4d9cad 100644
--- a/R/pkg/R/utils.R
+++ b/R/pkg/R/utils.R
@@ -32,7 +32,7 @@ convertJListToRList <- function(jList, flatten, logicalUpperBound = NULL,
}
results <- if (arrSize > 0) {
- lapply(0:(arrSize - 1),
+ lapply(0 : (arrSize - 1),
function(index) {
obj <- callJMethod(jList, "get", as.integer(index))
@@ -572,7 +572,7 @@ mergePartitions <- function(rdd, zip) {
keys <- list()
}
if (lengthOfValues > 1) {
- values <- part[(lengthOfKeys + 1) : (len - 1)]
+ values <- part[ (lengthOfKeys + 1) : (len - 1) ]
} else {
values <- list()
}
diff --git a/R/pkg/inst/tests/test_binary_function.R b/R/pkg/inst/tests/test_binary_function.R
index dca0657c57..f054ac9a87 100644
--- a/R/pkg/inst/tests/test_binary_function.R
+++ b/R/pkg/inst/tests/test_binary_function.R
@@ -40,7 +40,7 @@ test_that("union on two RDDs", {
expect_equal(actual, c(as.list(nums), mockFile))
expect_equal(getSerializedMode(union.rdd), "byte")
- rdd<- map(text.rdd, function(x) {x})
+ rdd <- map(text.rdd, function(x) {x})
union.rdd <- unionRDD(rdd, text.rdd)
actual <- collect(union.rdd)
expect_equal(actual, as.list(c(mockFile, mockFile)))
diff --git a/R/pkg/inst/tests/test_rdd.R b/R/pkg/inst/tests/test_rdd.R
index 6c3aaab8c7..71aed2bb9d 100644
--- a/R/pkg/inst/tests/test_rdd.R
+++ b/R/pkg/inst/tests/test_rdd.R
@@ -250,7 +250,7 @@ test_that("flatMapValues() on pairwise RDDs", {
expect_equal(actual, list(list(1,1), list(1,2), list(2,3), list(2,4)))
# Generate x to x+1 for every value
- actual <- collect(flatMapValues(intRdd, function(x) { x:(x + 1) }))
+ actual <- collect(flatMapValues(intRdd, function(x) { x: (x + 1) }))
expect_equal(actual,
list(list(1L, -1), list(1L, 0), list(2L, 100), list(2L, 101),
list(2L, 1), list(2L, 2), list(1L, 200), list(1L, 201)))
@@ -293,7 +293,7 @@ test_that("sumRDD() on RDDs", {
})
test_that("keyBy on RDDs", {
- func <- function(x) { x*x }
+ func <- function(x) { x * x }
keys <- keyBy(rdd, func)
actual <- collect(keys)
expect_equal(actual, lapply(nums, function(x) { list(func(x), x) }))
@@ -311,7 +311,7 @@ test_that("repartition/coalesce on RDDs", {
r2 <- repartition(rdd, 6)
expect_equal(numPartitions(r2), 6L)
count <- length(collectPartition(r2, 0L))
- expect_true(count >=0 && count <= 4)
+ expect_true(count >= 0 && count <= 4)
# coalesce
r3 <- coalesce(rdd, 1)
diff --git a/R/pkg/inst/tests/test_sparkSQL.R b/R/pkg/inst/tests/test_sparkSQL.R
index 61c8a7ec7d..aca41aa6dc 100644
--- a/R/pkg/inst/tests/test_sparkSQL.R
+++ b/R/pkg/inst/tests/test_sparkSQL.R
@@ -666,10 +666,12 @@ test_that("column binary mathfunctions", {
expect_equal(collect(select(df, atan2(df$a, df$b)))[2, "ATAN2(a, b)"], atan2(2, 6))
expect_equal(collect(select(df, atan2(df$a, df$b)))[3, "ATAN2(a, b)"], atan2(3, 7))
expect_equal(collect(select(df, atan2(df$a, df$b)))[4, "ATAN2(a, b)"], atan2(4, 8))
+ ## nolint start
expect_equal(collect(select(df, hypot(df$a, df$b)))[1, "HYPOT(a, b)"], sqrt(1^2 + 5^2))
expect_equal(collect(select(df, hypot(df$a, df$b)))[2, "HYPOT(a, b)"], sqrt(2^2 + 6^2))
expect_equal(collect(select(df, hypot(df$a, df$b)))[3, "HYPOT(a, b)"], sqrt(3^2 + 7^2))
expect_equal(collect(select(df, hypot(df$a, df$b)))[4, "HYPOT(a, b)"], sqrt(4^2 + 8^2))
+ ## nolint end
})
test_that("string operators", {
@@ -876,7 +878,7 @@ test_that("parquetFile works with multiple input paths", {
write.df(df, parquetPath2, "parquet", mode="overwrite")
parquetDF <- parquetFile(sqlContext, parquetPath, parquetPath2)
expect_is(parquetDF, "DataFrame")
- expect_equal(count(parquetDF), count(df)*2)
+ expect_equal(count(parquetDF), count(df) * 2)
})
test_that("describe() on a DataFrame", {