aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2013-02-25 13:27:11 -0800
committerMatei Zaharia <matei@eecs.berkeley.edu>2013-02-25 13:27:11 -0800
commit01bd136ba5c5f47a82b67b8d34df5a6aeaf79080 (patch)
treea136071514c308a17a7c679bf79addaebef350a8 /examples
parent848321f9101987a25dfae000fb2ebf72ec203f69 (diff)
downloadspark-01bd136ba5c5f47a82b67b8d34df5a6aeaf79080.tar.gz
spark-01bd136ba5c5f47a82b67b8d34df5a6aeaf79080.tar.bz2
spark-01bd136ba5c5f47a82b67b8d34df5a6aeaf79080.zip
Use public method sparkContext instead of protected sc in streaming examples
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/spark/streaming/examples/QueueStream.scala4
-rw-r--r--examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala2
-rw-r--r--examples/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala2
3 files changed, 4 insertions, 4 deletions
diff --git a/examples/src/main/scala/spark/streaming/examples/QueueStream.scala b/examples/src/main/scala/spark/streaming/examples/QueueStream.scala
index 2a265d021d..d83db7784d 100644
--- a/examples/src/main/scala/spark/streaming/examples/QueueStream.scala
+++ b/examples/src/main/scala/spark/streaming/examples/QueueStream.scala
@@ -30,10 +30,10 @@ object QueueStream {
// Create and push some RDDs into
for (i <- 1 to 30) {
- rddQueue += ssc.sc.makeRDD(1 to 1000, 10)
+ rddQueue += sc.sparkContext.makeRDD(1 to 1000, 10)
Thread.sleep(1000)
}
ssc.stop()
System.exit(0)
}
-} \ No newline at end of file
+}
diff --git a/examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala b/examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala
index 66e709b7a3..cce0001426 100644
--- a/examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala
+++ b/examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala
@@ -34,7 +34,7 @@ object RawNetworkGrep {
val ssc = new StreamingContext(master, "RawNetworkGrep", Milliseconds(batchMillis))
// Warm up the JVMs on master and slave for JIT compilation to kick in
- RawTextHelper.warmUp(ssc.sc)
+ RawTextHelper.warmUp(sc.sparkContext)
val rawStreams = (1 to numStreams).map(_ =>
ssc.rawSocketStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray
diff --git a/examples/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala b/examples/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala
index fba72519a9..9a2ba30ee4 100644
--- a/examples/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala
+++ b/examples/src/main/scala/spark/streaming/examples/clickstream/PageViewStream.scala
@@ -60,7 +60,7 @@ object PageViewStream {
.map("Unique active users: " + _)
// An external dataset we want to join to this stream
- val userList = ssc.sc.parallelize(
+ val userList = ssc.sparkContext.parallelize(
Map(1 -> "Patrick Wendell", 2->"Reynold Xin", 3->"Matei Zaharia").toSeq)
metric match {