aboutsummaryrefslogtreecommitdiff
path: root/streaming
diff options
context:
space:
mode:
authorTathagata Das <tathagata.das1565@gmail.com>2013-02-24 13:01:54 -0800
committerTathagata Das <tathagata.das1565@gmail.com>2013-02-24 13:01:54 -0800
commit28f8b721f65fc8e699f208c5dc64d90822a85d91 (patch)
treea8b14c0c32a294595f31bc679cc45337fc0b7e02 /streaming
parentf282bc496062377ba17b6cf4fefc653ebe70b360 (diff)
downloadspark-28f8b721f65fc8e699f208c5dc64d90822a85d91.tar.gz
spark-28f8b721f65fc8e699f208c5dc64d90822a85d91.tar.bz2
spark-28f8b721f65fc8e699f208c5dc64d90822a85d91.zip
Added back the initial spark job before starting streaming receivers
Diffstat (limited to 'streaming')
-rw-r--r--streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala2
1 files changed, 1 insertions, 1 deletions
diff --git a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala
index 64972fd5cd..b159d26c02 100644
--- a/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala
+++ b/streaming/src/main/scala/spark/streaming/NetworkInputTracker.scala
@@ -141,7 +141,7 @@ class NetworkInputTracker(
}
// Run the dummy Spark job to ensure that all slaves have registered.
// This avoids all the receivers to be scheduled on the same node.
- //ssc.sparkContext.makeRDD(1 to 100, 100).map(x => (x, 1)).reduceByKey(_ + _, 20).collect()
+ ssc.sparkContext.makeRDD(1 to 50, 50).map(x => (x, 1)).reduceByKey(_ + _, 20).collect()
// Distribute the receivers and start them
ssc.sparkContext.runJob(tempRDD, startReceiver)