aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYuu ISHIKAWA <yuu.ishikawa@gmail.com>2015-07-30 10:00:27 -0700
committerShivaram Venkataraman <shivaram@cs.berkeley.edu>2015-07-30 10:00:27 -0700
commit7492a33fdd074446c30c657d771a69932a00246d (patch)
tree1525ee76df94c6204a0c13807646c2553816d1c1
parent81464f2a8243c6ae2a39bac7ebdc50d4f60af451 (diff)
downloadspark-7492a33fdd074446c30c657d771a69932a00246d.tar.gz
spark-7492a33fdd074446c30c657d771a69932a00246d.tar.bz2
spark-7492a33fdd074446c30c657d771a69932a00246d.zip
[SPARK-9248] [SPARKR] Closing curly-braces should always be on their own line
### JIRA [[SPARK-9248] Closing curly-braces should always be on their own line - ASF JIRA](https://issues.apache.org/jira/browse/SPARK-9248) ## The result of `dev/lint-r` [The result of `dev/lint-r` for SPARK-9248 at the revistion:6175d6cfe795fbd88e3ee713fac375038a3993a8](https://gist.github.com/yu-iskw/96cadcea4ce664c41f81) Author: Yuu ISHIKAWA <yuu.ishikawa@gmail.com> Closes #7795 from yu-iskw/SPARK-9248 and squashes the following commits: c8eccd3 [Yuu ISHIKAWA] [SPARK-9248][SparkR] Closing curly-braces should always be on their own line
-rw-r--r--R/pkg/R/generics.R14
-rw-r--r--R/pkg/R/pairRDD.R4
-rw-r--r--R/pkg/R/sparkR.R9
-rw-r--r--R/pkg/inst/tests/test_sparkSQL.R6
4 files changed, 19 insertions, 14 deletions
diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R
index 836e0175c3..a3a121058e 100644
--- a/R/pkg/R/generics.R
+++ b/R/pkg/R/generics.R
@@ -254,8 +254,10 @@ setGeneric("flatMapValues", function(X, FUN) { standardGeneric("flatMapValues")
# @rdname intersection
# @export
-setGeneric("intersection", function(x, other, numPartitions = 1) {
- standardGeneric("intersection") })
+setGeneric("intersection",
+ function(x, other, numPartitions = 1) {
+ standardGeneric("intersection")
+ })
# @rdname keys
# @export
@@ -489,9 +491,7 @@ setGeneric("sample",
#' @rdname sample
#' @export
setGeneric("sample_frac",
- function(x, withReplacement, fraction, seed) {
- standardGeneric("sample_frac")
- })
+ function(x, withReplacement, fraction, seed) { standardGeneric("sample_frac") })
#' @rdname saveAsParquetFile
#' @export
@@ -553,8 +553,8 @@ setGeneric("withColumn", function(x, colName, col) { standardGeneric("withColumn
#' @rdname withColumnRenamed
#' @export
-setGeneric("withColumnRenamed", function(x, existingCol, newCol) {
- standardGeneric("withColumnRenamed") })
+setGeneric("withColumnRenamed",
+ function(x, existingCol, newCol) { standardGeneric("withColumnRenamed") })
###################### Column Methods ##########################
diff --git a/R/pkg/R/pairRDD.R b/R/pkg/R/pairRDD.R
index ebc6ff65e9..83801d3209 100644
--- a/R/pkg/R/pairRDD.R
+++ b/R/pkg/R/pairRDD.R
@@ -202,8 +202,8 @@ setMethod("partitionBy",
packageNamesArr <- serialize(.sparkREnv$.packages,
connection = NULL)
- broadcastArr <- lapply(ls(.broadcastNames), function(name) {
- get(name, .broadcastNames) })
+ broadcastArr <- lapply(ls(.broadcastNames),
+ function(name) { get(name, .broadcastNames) })
jrdd <- getJRDD(x)
# We create a PairwiseRRDD that extends RDD[(Int, Array[Byte])],
diff --git a/R/pkg/R/sparkR.R b/R/pkg/R/sparkR.R
index 76c15875b5..e83104f116 100644
--- a/R/pkg/R/sparkR.R
+++ b/R/pkg/R/sparkR.R
@@ -22,7 +22,8 @@
connExists <- function(env) {
tryCatch({
exists(".sparkRCon", envir = env) && isOpen(env[[".sparkRCon"]])
- }, error = function(err) {
+ },
+ error = function(err) {
return(FALSE)
})
}
@@ -153,7 +154,8 @@ sparkR.init <- function(
.sparkREnv$backendPort <- backendPort
tryCatch({
connectBackend("localhost", backendPort)
- }, error = function(err) {
+ },
+ error = function(err) {
stop("Failed to connect JVM\n")
})
@@ -264,7 +266,8 @@ sparkRHive.init <- function(jsc = NULL) {
ssc <- callJMethod(sc, "sc")
hiveCtx <- tryCatch({
newJObject("org.apache.spark.sql.hive.HiveContext", ssc)
- }, error = function(err) {
+ },
+ error = function(err) {
stop("Spark SQL is not built with Hive support")
})
diff --git a/R/pkg/inst/tests/test_sparkSQL.R b/R/pkg/inst/tests/test_sparkSQL.R
index 62fe48a5d6..d5db97248c 100644
--- a/R/pkg/inst/tests/test_sparkSQL.R
+++ b/R/pkg/inst/tests/test_sparkSQL.R
@@ -112,7 +112,8 @@ test_that("create DataFrame from RDD", {
df <- jsonFile(sqlContext, jsonPathNa)
hiveCtx <- tryCatch({
newJObject("org.apache.spark.sql.hive.test.TestHiveContext", ssc)
- }, error = function(err) {
+ },
+ error = function(err) {
skip("Hive is not build with SparkSQL, skipped")
})
sql(hiveCtx, "CREATE TABLE people (name string, age double, height float)")
@@ -602,7 +603,8 @@ test_that("write.df() as parquet file", {
test_that("test HiveContext", {
hiveCtx <- tryCatch({
newJObject("org.apache.spark.sql.hive.test.TestHiveContext", ssc)
- }, error = function(err) {
+ },
+ error = function(err) {
skip("Hive is not build with SparkSQL, skipped")
})
df <- createExternalTable(hiveCtx, "json", jsonPath, "json")