aboutsummaryrefslogtreecommitdiff
path: root/R/pkg/R/context.R
blob: 1ca573e5bd614f6aabc2bf97991f01179f152284 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# context.R: SparkContext driven functions

getMinPartitions <- function(sc, minPartitions) {
  if (is.null(minPartitions)) {
    defaultParallelism <- callJMethod(sc, "defaultParallelism")
    minPartitions <- min(defaultParallelism, 2)
  }
  as.integer(minPartitions)
}

#' Create an RDD from a text file.
#'
#' This function reads a text file from HDFS, a local file system (available on all
#' nodes), or any Hadoop-supported file system URI, and creates an
#' RDD of strings from it.
#'
#' @param sc SparkContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @param minPartitions Minimum number of partitions to be created. If NULL, the default
#'  value is chosen based on available parallelism.
#' @return RDD where each item is of type \code{character}
#' @noRd
#' @examples
#'\dontrun{
#'  sc <- sparkR.init()
#'  lines <- textFile(sc, "myfile.txt")
#'}
textFile <- function(sc, path, minPartitions = NULL) {
  # Allow the user to have a more flexible definiton of the text file path
  path <- suppressWarnings(normalizePath(path))
  # Convert a string vector of paths to a string containing comma separated paths
  path <- paste(path, collapse = ",")

  jrdd <- callJMethod(sc, "textFile", path, getMinPartitions(sc, minPartitions))
  # jrdd is of type JavaRDD[String]
  RDD(jrdd, "string")
}

#' Load an RDD saved as a SequenceFile containing serialized objects.
#'
#' The file to be loaded should be one that was previously generated by calling
#' saveAsObjectFile() of the RDD class.
#'
#' @param sc SparkContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @param minPartitions Minimum number of partitions to be created. If NULL, the default
#'  value is chosen based on available parallelism.
#' @return RDD containing serialized R objects.
#' @seealso saveAsObjectFile
#' @noRd
#' @examples
#'\dontrun{
#'  sc <- sparkR.init()
#'  rdd <- objectFile(sc, "myfile")
#'}
objectFile <- function(sc, path, minPartitions = NULL) {
  # Allow the user to have a more flexible definiton of the text file path
  path <- suppressWarnings(normalizePath(path))
  # Convert a string vector of paths to a string containing comma separated paths
  path <- paste(path, collapse = ",")

  jrdd <- callJMethod(sc, "objectFile", path, getMinPartitions(sc, minPartitions))
  # Assume the RDD contains serialized R objects.
  RDD(jrdd, "byte")
}

#' Create an RDD from a homogeneous list or vector.
#'
#' This function creates an RDD from a local homogeneous list in R. The elements
#' in the list are split into \code{numSlices} slices and distributed to nodes
#' in the cluster.
#'
#' If size of serialized slices is larger than spark.r.maxAllocationLimit or (200MB), the function
#' will write it to disk and send the file name to JVM. Also to make sure each slice is not
#' larger than that limit, number of slices may be increased.
#'
#' In 2.2.0 we are changing how the numSlices are used/computed to handle
#' 1 < (length(coll) / numSlices) << length(coll) better, and to get the exact number of slices.
#' This change affects both createDataFrame and spark.lapply.
#' In the specific one case that it is used to convert R native object into SparkDataFrame, it has
#' always been kept at the default of 1. In the case the object is large, we are explicitly setting
#' the parallism to numSlices (which is still 1).
#'
#' Specifically, we are changing to split positions to match the calculation in positions() of
#' ParallelCollectionRDD in Spark.
#'
#' @param sc SparkContext to use
#' @param coll collection to parallelize
#' @param numSlices number of partitions to create in the RDD
#' @return an RDD created from this collection
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2)
#' # The RDD should contain 10 elements
#' length(rdd)
#'}
parallelize <- function(sc, coll, numSlices = 1) {
  # TODO: bound/safeguard numSlices
  # TODO: unit tests for if the split works for all primitives
  # TODO: support matrix, data frame, etc

  # Note, for data.frame, createDataFrame turns it into a list before it calls here.
  # nolint start
  # suppress lintr warning: Place a space before left parenthesis, except in a function call.
  if ((!is.list(coll) && !is.vector(coll)) || is.data.frame(coll)) {
  # nolint end
    if (is.data.frame(coll)) {
      message(paste("context.R: A data frame is parallelized by columns."))
    } else {
      if (is.matrix(coll)) {
        message(paste("context.R: A matrix is parallelized by elements."))
      } else {
        message(paste("context.R: parallelize() currently only supports lists and vectors.",
                      "Calling as.list() to coerce coll into a list."))
      }
    }
    coll <- as.list(coll)
  }

  sizeLimit <- getMaxAllocationLimit(sc)
  objectSize <- object.size(coll)

  # For large objects we make sure the size of each slice is also smaller than sizeLimit
  numSerializedSlices <- max(numSlices, ceiling(objectSize / sizeLimit))
  if (numSerializedSlices > length(coll))
    numSerializedSlices <- length(coll)

  # Generate the slice ids to put each row
  # For instance, for numSerializedSlices of 22, length of 50
  #  [1]  0  0  2  2  4  4  6  6  6  9  9 11 11 13 13 15 15 15 18 18 20 20 22 22 22
  # [26] 25 25 27 27 29 29 31 31 31 34 34 36 36 38 38 40 40 40 43 43 45 45 47 47 47
  # Notice the slice group with 3 slices (ie. 6, 15, 22) are roughly evenly spaced.
  # We are trying to reimplement the calculation in the positions method in ParallelCollectionRDD
  splits <- if (numSerializedSlices > 0) {
    unlist(lapply(0: (numSerializedSlices - 1), function(x) {
      # nolint start
      start <- trunc((x * length(coll)) / numSerializedSlices)
      end <- trunc(((x + 1) * length(coll)) / numSerializedSlices)
      # nolint end
      rep(start, end - start)
    }))
  } else {
    1
  }

  slices <- split(coll, splits)

  # Serialize each slice: obtain a list of raws, or a list of lists (slices) of
  # 2-tuples of raws
  serializedSlices <- lapply(slices, serialize, connection = NULL)

  # The PRC backend cannot handle arguments larger than 2GB (INT_MAX)
  # If serialized data is safely less than that threshold we send it over the PRC channel.
  # Otherwise, we write it to a file and send the file name
  if (objectSize < sizeLimit) {
    jrdd <- callJStatic("org.apache.spark.api.r.RRDD", "createRDDFromArray", sc, serializedSlices)
  } else {
    fileName <- writeToTempFile(serializedSlices)
    jrdd <- tryCatch(callJStatic(
        "org.apache.spark.api.r.RRDD", "createRDDFromFile", sc, fileName, as.integer(numSlices)),
      finally = {
        file.remove(fileName)
    })
  }

  RDD(jrdd, "byte")
}

getMaxAllocationLimit <- function(sc) {
  conf <- callJMethod(sc, "getConf")
  as.numeric(
    callJMethod(conf,
      "get",
      "spark.r.maxAllocationLimit",
      toString(.Machine$integer.max / 10) # Default to a safe value: 200MB
  ))
}

writeToTempFile <- function(serializedSlices) {
  fileName <- tempfile()
  conn <- file(fileName, "wb")
  for (slice in serializedSlices) {
    writeBin(as.integer(length(slice)), conn, endian = "big")
    writeBin(slice, conn, endian = "big")
  }
  close(conn)
  fileName
}

#' Include this specified package on all workers
#'
#' This function can be used to include a package on all workers before the
#' user's code is executed. This is useful in scenarios where other R package
#' functions are used in a function passed to functions like \code{lapply}.
#' NOTE: The package is assumed to be installed on every node in the Spark
#' cluster.
#'
#' @param sc SparkContext to use
#' @param pkg Package name
#' @noRd
#' @examples
#'\dontrun{
#'  library(Matrix)
#'
#'  sc <- sparkR.init()
#'  # Include the matrix library we will be using
#'  includePackage(sc, Matrix)
#'
#'  generateSparse <- function(x) {
#'    sparseMatrix(i=c(1, 2, 3), j=c(1, 2, 3), x=c(1, 2, 3))
#'  }
#'
#'  rdd <- lapplyPartition(parallelize(sc, 1:2, 2L), generateSparse)
#'  collect(rdd)
#'}
includePackage <- function(sc, pkg) {
  pkg <- as.character(substitute(pkg))
  if (exists(".packages", .sparkREnv)) {
    packages <- .sparkREnv$.packages
  } else {
    packages <- list()
  }
  packages <- c(packages, pkg)
  .sparkREnv$.packages <- packages
}

#' Broadcast a variable to all workers
#'
#' Broadcast a read-only variable to the cluster, returning a \code{Broadcast}
#' object for reading it in distributed functions.
#'
#' @param sc Spark Context to use
#' @param object Object to be broadcast
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:2, 2L)
#'
#' # Large Matrix object that we want to broadcast
#' randomMat <- matrix(nrow=100, ncol=10, data=rnorm(1000))
#' randomMatBr <- broadcast(sc, randomMat)
#'
#' # Use the broadcast variable inside the function
#' useBroadcast <- function(x) {
#'   sum(value(randomMatBr) * x)
#' }
#' sumRDD <- lapply(rdd, useBroadcast)
#'}
broadcast <- function(sc, object) {
  objName <- as.character(substitute(object))
  serializedObj <- serialize(object, connection = NULL)

  jBroadcast <- callJMethod(sc, "broadcast", serializedObj)
  id <- as.character(callJMethod(jBroadcast, "id"))

  Broadcast(id, object, jBroadcast, objName)
}

#' Set the checkpoint directory
#'
#' Set the directory under which RDDs are going to be checkpointed. The
#' directory must be a HDFS path if running on a cluster.
#'
#' @param sc Spark Context to use
#' @param dirName Directory path
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' setCheckpointDir(sc, "~/checkpoint")
#' rdd <- parallelize(sc, 1:2, 2L)
#' checkpoint(rdd)
#'}
setCheckpointDirSC <- function(sc, dirName) {
  invisible(callJMethod(sc, "setCheckpointDir", suppressWarnings(normalizePath(dirName))))
}

#' Add a file or directory to be downloaded with this Spark job on every node.
#'
#' The path passed can be either a local file, a file in HDFS (or other Hadoop-supported
#' filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
#' use spark.getSparkFiles(fileName) to find its download location.
#'
#' A directory can be given if the recursive option is set to true.
#' Currently directories are only supported for Hadoop-supported filesystems.
#' Refer Hadoop-supported filesystems at \url{https://wiki.apache.org/hadoop/HCFS}.
#'
#' @rdname spark.addFile
#' @param path The path of the file to be added
#' @param recursive Whether to add files recursively from the path. Default is FALSE.
#' @export
#' @examples
#'\dontrun{
#' spark.addFile("~/myfile")
#'}
#' @note spark.addFile since 2.1.0
spark.addFile <- function(path, recursive = FALSE) {
  sc <- getSparkContext()
  invisible(callJMethod(sc, "addFile", suppressWarnings(normalizePath(path)), recursive))
}

#' Get the root directory that contains files added through spark.addFile.
#'
#' @rdname spark.getSparkFilesRootDirectory
#' @return the root directory that contains files added through spark.addFile
#' @export
#' @examples
#'\dontrun{
#' spark.getSparkFilesRootDirectory()
#'}
#' @note spark.getSparkFilesRootDirectory since 2.1.0
spark.getSparkFilesRootDirectory <- function() {
  callJStatic("org.apache.spark.SparkFiles", "getRootDirectory")
}

#' Get the absolute path of a file added through spark.addFile.
#'
#' @rdname spark.getSparkFiles
#' @param fileName The name of the file added through spark.addFile
#' @return the absolute path of a file added through spark.addFile.
#' @export
#' @examples
#'\dontrun{
#' spark.getSparkFiles("myfile")
#'}
#' @note spark.getSparkFiles since 2.1.0
spark.getSparkFiles <- function(fileName) {
  callJStatic("org.apache.spark.SparkFiles", "get", as.character(fileName))
}

#' Run a function over a list of elements, distributing the computations with Spark
#'
#' Run a function over a list of elements, distributing the computations with Spark. Applies a
#' function in a manner that is similar to doParallel or lapply to elements of a list.
#' The computations are distributed using Spark. It is conceptually the same as the following code:
#'   lapply(list, func)
#'
#' Known limitations:
#' \itemize{
#'    \item variable scoping and capture: compared to R's rich support for variable resolutions,
#'    the distributed nature of SparkR limits how variables are resolved at runtime. All the
#'    variables that are available through lexical scoping are embedded in the closure of the
#'    function and available as read-only variables within the function. The environment variables
#'    should be stored into temporary variables outside the function, and not directly accessed
#'    within the function.
#'
#'   \item loading external packages: In order to use a package, you need to load it inside the
#'   closure. For example, if you rely on the MASS module, here is how you would use it:
#'   \preformatted{
#'     train <- function(hyperparam) {
#'       library(MASS)
#'       lm.ridge("y ~ x+z", data, lambda=hyperparam)
#'       model
#'     }
#'   }
#' }
#'
#' @rdname spark.lapply
#' @param list the list of elements
#' @param func a function that takes one argument.
#' @return a list of results (the exact type being determined by the function)
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
#' doubled <- spark.lapply(1:10, function(x){2 * x})
#'}
#' @note spark.lapply since 2.0.0
spark.lapply <- function(list, func) {
  sc <- getSparkContext()
  rdd <- parallelize(sc, list, length(list))
  results <- map(rdd, func)
  local <- collectRDD(results)
  local
}

#' Set new log level
#'
#' Set new log level: "ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN"
#'
#' @rdname setLogLevel
#' @param level New log level
#' @export
#' @examples
#'\dontrun{
#' setLogLevel("ERROR")
#'}
#' @note setLogLevel since 2.0.0
setLogLevel <- function(level) {
  sc <- getSparkContext()
  invisible(callJMethod(sc, "setLogLevel", level))
}

#' Set checkpoint directory
#'
#' Set the directory under which SparkDataFrame are going to be checkpointed. The directory must be
#' a HDFS path if running on a cluster.
#'
#' @rdname setCheckpointDir
#' @param directory Directory path to checkpoint to
#' @seealso \link{checkpoint}
#' @export
#' @examples
#'\dontrun{
#' setCheckpointDir("/checkpoint")
#'}
#' @note setCheckpointDir since 2.2.0
setCheckpointDir <- function(directory) {
  sc <- getSparkContext()
  invisible(callJMethod(sc, "setCheckpointDir", suppressWarnings(normalizePath(directory))))
}