aboutsummaryrefslogtreecommitdiff
path: root/streaming
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2013-02-25 19:34:32 -0800
committerMatei Zaharia <matei@eecs.berkeley.edu>2013-02-25 19:34:32 -0800
commit5d7b591cfe14177f083814fe3e81745c5d279810 (patch)
tree0fd84b8a60d4ae79d3165f34159ded0e1b97c135 /streaming
parent7b8853493248f4b2855a548facc407a3db939ba0 (diff)
downloadspark-5d7b591cfe14177f083814fe3e81745c5d279810.tar.gz
spark-5d7b591cfe14177f083814fe3e81745c5d279810.tar.bz2
spark-5d7b591cfe14177f083814fe3e81745c5d279810.zip
Pass a code JAR to SparkContext in our examples. Fixes SPARK-594.
Diffstat (limited to 'streaming')
-rw-r--r--streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala17
1 files changed, 17 insertions, 0 deletions
diff --git a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala
index 755407aecc..3d149a742c 100644
--- a/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala
+++ b/streaming/src/main/scala/spark/streaming/api/java/JavaStreamingContext.scala
@@ -49,6 +49,23 @@ class JavaStreamingContext(val ssc: StreamingContext) {
* @param appName Name to be used when registering with the scheduler
* @param batchDuration The time interval at which streaming data will be divided into batches
* @param sparkHome The SPARK_HOME directory on the slave nodes
+ * @param jarFile JAR file containing job code, to ship to cluster. This can be a path on the local
+ * file system or an HDFS, HTTP, HTTPS, or FTP URL.
+ */
+ def this(
+ master: String,
+ appName: String,
+ batchDuration: Duration,
+ sparkHome: String,
+ jarFile: String) =
+ this(new StreamingContext(master, appName, batchDuration, sparkHome, Seq(jarFile), Map()))
+
+ /**
+ * Creates a StreamingContext.
+ * @param master Name of the Spark Master
+ * @param appName Name to be used when registering with the scheduler
+ * @param batchDuration The time interval at which streaming data will be divided into batches
+ * @param sparkHome The SPARK_HOME directory on the slave nodes
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/