aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@eecs.berkeley.edu>2012-12-28 16:20:38 -0800
committerJosh Rosen <joshrosen@eecs.berkeley.edu>2012-12-28 17:00:57 -0800
commitd64fa72d2e4a8290d15e65459337f544e55b3b48 (patch)
treef1db3f30eddf72c61d64405fc5e103a0a43b0575 /core
parentbd237d4a9d7f08eb143b2a2b8636a6a8453225ea (diff)
downloadspark-d64fa72d2e4a8290d15e65459337f544e55b3b48.tar.gz
spark-d64fa72d2e4a8290d15e65459337f544e55b3b48.tar.bz2
spark-d64fa72d2e4a8290d15e65459337f544e55b3b48.zip
Add addFile() and addJar() to JavaSparkContext.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/spark/api/java/JavaSparkContext.scala34
1 files changed, 34 insertions, 0 deletions
diff --git a/core/src/main/scala/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/spark/api/java/JavaSparkContext.scala
index edbb187b1b..b7725313c4 100644
--- a/core/src/main/scala/spark/api/java/JavaSparkContext.scala
+++ b/core/src/main/scala/spark/api/java/JavaSparkContext.scala
@@ -301,6 +301,40 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork
* (in that order of preference). If neither of these is set, return None.
*/
def getSparkHome(): Option[String] = sc.getSparkHome()
+
+ /**
+ * Add a file to be downloaded into the working directory of this Spark job on every node.
+ * The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
+ * filesystems), or an HTTP, HTTPS or FTP URI.
+ */
+ def addFile(path: String) {
+ sc.addFile(path)
+ }
+
+ /**
+ * Adds a JAR dependency for all tasks to be executed on this SparkContext in the future.
+ * The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
+ * filesystems), or an HTTP, HTTPS or FTP URI.
+ */
+ def addJar(path: String) {
+ sc.addJar(path)
+ }
+
+ /**
+ * Clear the job's list of JARs added by `addJar` so that they do not get downloaded to
+ * any new nodes.
+ */
+ def clearJars() {
+ sc.clearJars()
+ }
+
+ /**
+ * Clear the job's list of files added by `addFile` so that they do not get downloaded to
+ * any new nodes.
+ */
+ def clearFiles() {
+ sc.clearFiles()
+ }
}
object JavaSparkContext {