From c23cd72b4bbcbf5f615636095c69e9a2e39bfbdd Mon Sep 17 00:00:00 2001 From: jerryshao Date: Sat, 12 Oct 2013 15:02:57 +0800 Subject: Upgrade Kafka 0.7.2 to Kafka 0.8.0-beta1 for Spark Streaming --- examples/pom.xml | 28 +++++-- .../streaming/examples/JavaKafkaWordCount.java | 98 ++++++++++++++++++++++ .../spark/streaming/examples/KafkaWordCount.scala | 28 ++++--- 3 files changed, 135 insertions(+), 19 deletions(-) create mode 100644 examples/src/main/java/org/apache/spark/streaming/examples/JavaKafkaWordCount.java (limited to 'examples') diff --git a/examples/pom.xml b/examples/pom.xml index b8c020a321..b97e6af288 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -32,13 +32,20 @@ http://spark.incubator.apache.org/ - - lib - file://${project.basedir}/lib + apache-repo + Apache Repository + https://repository.apache.org/content/repositories/releases + + true + + + false + + org.apache.spark @@ -81,9 +88,18 @@ org.apache.kafka - kafka - 0.7.2-spark - provided + kafka_2.9.2 + 0.8.0-beta1 + + + com.sun.jmx + jmxri + + + com.sun.jdmk + jmxtools + + org.eclipse.jetty diff --git a/examples/src/main/java/org/apache/spark/streaming/examples/JavaKafkaWordCount.java b/examples/src/main/java/org/apache/spark/streaming/examples/JavaKafkaWordCount.java new file mode 100644 index 0000000000..9a8e4209ed --- /dev/null +++ b/examples/src/main/java/org/apache/spark/streaming/examples/JavaKafkaWordCount.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.streaming.examples; + +import java.util.Map; +import java.util.HashMap; + +import com.google.common.collect.Lists; +import org.apache.spark.api.java.function.FlatMapFunction; +import org.apache.spark.api.java.function.Function; +import org.apache.spark.api.java.function.Function2; +import org.apache.spark.api.java.function.PairFunction; +import org.apache.spark.streaming.Duration; +import org.apache.spark.streaming.api.java.JavaDStream; +import org.apache.spark.streaming.api.java.JavaPairDStream; +import org.apache.spark.streaming.api.java.JavaStreamingContext; +import scala.Tuple2; + +/** + * Consumes messages from one or more topics in Kafka and does wordcount. + * Usage: JavaKafkaWordCount + * is the Spark master URL. In local mode, should be 'local[n]' with n > 1. + * is a list of one or more zookeeper servers that make quorum + * is the name of kafka consumer group + * is a list of one or more kafka topics to consume from + * is the number of threads the kafka consumer should use + * + * Example: + * `./run-example org.apache.spark.streaming.examples.JavaKafkaWordCount local[2] zoo01,zoo02, + * zoo03 my-consumer-group topic1,topic2 1` + */ + +public class JavaKafkaWordCount { + public static void main(String[] args) { + if (args.length < 5) { + System.err.println("Usage: KafkaWordCount "); + System.exit(1); + } + + // Create the context with a 1 second batch size + JavaStreamingContext ssc = new JavaStreamingContext(args[0], "NetworkWordCount", + new Duration(2000), System.getenv("SPARK_HOME"), System.getenv("SPARK_EXAMPLES_JAR")); + + int numThreads = Integer.parseInt(args[4]); + Map topicMap = new HashMap(); + String[] topics = args[3].split(","); + for (String topic: topics) { + topicMap.put(topic, numThreads); + } + + JavaPairDStream messages = ssc.kafkaStream(args[1], args[2], topicMap); + + JavaDStream lines = messages.map(new Function, String>() { + @Override + public String call(Tuple2 tuple2) throws Exception { + return tuple2._2(); + } + }); + + JavaDStream words = lines.flatMap(new FlatMapFunction() { + @Override + public Iterable call(String x) { + return Lists.newArrayList(x.split(" ")); + } + }); + + JavaPairDStream wordCounts = words.map( + new PairFunction() { + @Override + public Tuple2 call(String s) throws Exception { + return new Tuple2(s, 1); + } + }).reduceByKey(new Function2() { + @Override + public Integer call(Integer i1, Integer i2) throws Exception { + return i1 + i2; + } + }); + + wordCounts.print(); + ssc.start(); + } +} diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/KafkaWordCount.scala b/examples/src/main/scala/org/apache/spark/streaming/examples/KafkaWordCount.scala index 12f939d5a7..570ba4c81a 100644 --- a/examples/src/main/scala/org/apache/spark/streaming/examples/KafkaWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/streaming/examples/KafkaWordCount.scala @@ -18,13 +18,11 @@ package org.apache.spark.streaming.examples import java.util.Properties -import kafka.message.Message -import kafka.producer.SyncProducerConfig + import kafka.producer._ -import org.apache.spark.SparkContext + import org.apache.spark.streaming._ import org.apache.spark.streaming.StreamingContext._ -import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.util.RawTextHelper._ /** @@ -54,9 +52,10 @@ object KafkaWordCount { ssc.checkpoint("checkpoint") val topicpMap = topics.split(",").map((_,numThreads.toInt)).toMap - val lines = ssc.kafkaStream(zkQuorum, group, topicpMap) + val lines = ssc.kafkaStream(zkQuorum, group, topicpMap).map(_._2) val words = lines.flatMap(_.split(" ")) - val wordCounts = words.map(x => (x, 1l)).reduceByKeyAndWindow(add _, subtract _, Minutes(10), Seconds(2), 2) + val wordCounts = words.map(x => (x, 1l)) + .reduceByKeyAndWindow(add _, subtract _, Minutes(10), Seconds(2), 2) wordCounts.print() ssc.start() @@ -68,15 +67,16 @@ object KafkaWordCountProducer { def main(args: Array[String]) { if (args.length < 2) { - System.err.println("Usage: KafkaWordCountProducer ") + System.err.println("Usage: KafkaWordCountProducer " + + " ") System.exit(1) } - val Array(zkQuorum, topic, messagesPerSec, wordsPerMessage) = args + val Array(brokers, topic, messagesPerSec, wordsPerMessage) = args // Zookeper connection properties val props = new Properties() - props.put("zk.connect", zkQuorum) + props.put("metadata.broker.list", brokers) props.put("serializer.class", "kafka.serializer.StringEncoder") val config = new ProducerConfig(props) @@ -85,11 +85,13 @@ object KafkaWordCountProducer { // Send some messages while(true) { val messages = (1 to messagesPerSec.toInt).map { messageNum => - (1 to wordsPerMessage.toInt).map(x => scala.util.Random.nextInt(10).toString).mkString(" ") + val str = (1 to wordsPerMessage.toInt).map(x => scala.util.Random.nextInt(10).toString) + .mkString(" ") + + new KeyedMessage[String, String](topic, str) }.toArray - println(messages.mkString(",")) - val data = new ProducerData[String, String](topic, messages) - producer.send(data) + + producer.send(messages: _*) Thread.sleep(100) } } -- cgit v1.2.3 From 9eaf68fd4032eaa8e8e8930c14fae2fad3d17d14 Mon Sep 17 00:00:00 2001 From: prabeesh Date: Wed, 16 Oct 2013 13:40:38 +0530 Subject: added mqtt adapter wordcount example --- .../spark/streaming/examples/MQTTWordCount.scala | 112 +++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala (limited to 'examples') diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala new file mode 100644 index 0000000000..04e21bef5e --- /dev/null +++ b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.streaming.examples + +import org.apache.spark.streaming.{ Seconds, StreamingContext } +import org.apache.spark.streaming.StreamingContext._ +import org.apache.spark.streaming.dstream.MQTTReceiver +import org.apache.spark.storage.StorageLevel + +import org.eclipse.paho.client.mqttv3.MqttClient; +import org.eclipse.paho.client.mqttv3.MqttClientPersistence; +import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence; +import org.eclipse.paho.client.mqttv3.MqttException; +import org.eclipse.paho.client.mqttv3.MqttMessage; +import org.eclipse.paho.client.mqttv3.MqttTopic; + +/** + * A simple Mqtt publisher for demonstration purposes, repeatedly publishes Space separated String Message "hello mqtt demo for spark streaming" + */ + +object MQTTPublisher { + + var client: MqttClient = _ + + def main(args: Array[String]) { + if (args.length < 2) { + System.err.println( + "Usage: MQTTPublisher ") + System.exit(1) + } + + val Seq(brokerUrl, topic) = args.toSeq + + try { + var peristance:MqttClientPersistence =new MqttDefaultFilePersistence("/tmp"); + client = new MqttClient(brokerUrl, MqttClient.generateClientId(), peristance); + } catch { + case e: MqttException => println("Exception Caught: " + e); + + } + + client.connect(); + + val msgtopic: MqttTopic = client.getTopic(topic); + val msg: String = "hello mqtt demo for spark streaming"; + + while (true) { + val message: MqttMessage = new MqttMessage(String.valueOf(msg).getBytes()); + msgtopic.publish(message); + println("Published data. topic: " + msgtopic.getName() + " Message: " + message); + } + client.disconnect(); + + } +} + +/** + * A sample wordcount with MqttStream stream + * + * To work with Mqtt, Mqtt Message broker/server required. + * Mosquitto (http://mosquitto.org/) is an open source Mqtt Broker + * In ubuntu mosquitto can be installed using the command `$ sudo apt-get install mosquitto` + * Eclipse paho project provides Java library for Mqtt Client http://www.eclipse.org/paho/ + * Example Java code for Mqtt Publisher and Subscriber can be found here https://bitbucket.org/mkjinesh/mqttclient + * Usage: MQTTWordCount + * In local mode, should be 'local[n]' with n > 1 + * and describe where Mqtt publisher is running. + * + * To run this example locally, you may run publisher as + * `$ ./run-example org.apache.spark.streaming.examples.MQTTPublisher tcp://localhost:1883 foo` + * and run the example as + * `$ ./run-example org.apache.spark.streaming.examples.MQTTWordCount local[2] tcp://localhost:1883 foo` + */ + +object MQTTWordCount { + + def main(args: Array[String]) { + if (args.length < 3) { + System.err.println( + "Usage: MQTTWordCount " + + " In local mode, should be 'local[n]' with n > 1") + System.exit(1) + } + + val Seq(master, brokerUrl, topic) = args.toSeq + + val ssc = new StreamingContext(master, "MqttWordCount", Seconds(2), System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR"))) + val lines = ssc.mqttStream(brokerUrl, topic, StorageLevel.MEMORY_ONLY) + + val words = lines.flatMap(x => x.toString.split(" ")) + val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) + wordCounts.print() + + ssc.start() + } +} + -- cgit v1.2.3 From 7d36a117c1d3a37d73f474d0074e57ee0b781205 Mon Sep 17 00:00:00 2001 From: prabeesh Date: Wed, 16 Oct 2013 13:41:26 +0530 Subject: add maven dependencies for mqtt --- examples/pom.xml | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'examples') diff --git a/examples/pom.xml b/examples/pom.xml index 224cf6c96c..afce8493cd 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -125,6 +125,11 @@ + + org.eclipse.paho + mqtt-client + 0.4.0 + -- cgit v1.2.3 From ee4178f144d7752092da53ceea686fbb6c37d5db Mon Sep 17 00:00:00 2001 From: prabeesh Date: Thu, 17 Oct 2013 09:57:48 +0530 Subject: remove unused dependency --- examples/pom.xml | 5 ----- 1 file changed, 5 deletions(-) (limited to 'examples') diff --git a/examples/pom.xml b/examples/pom.xml index afce8493cd..224cf6c96c 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -125,11 +125,6 @@ - - org.eclipse.paho - mqtt-client - 0.4.0 - -- cgit v1.2.3 From feb45d391f8d09c120d7d43e72e96e9bf9784fa0 Mon Sep 17 00:00:00 2001 From: Mosharaf Chowdhury Date: Mon, 14 Oct 2013 14:50:49 -0700 Subject: Default blockSize is 4MB. BroadcastTest2 example added for testing broadcasts. --- .../apache/spark/broadcast/TorrentBroadcast.scala | 2 +- .../org/apache/spark/examples/BroadcastTest2.scala | 59 ++++++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala (limited to 'examples') diff --git a/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala b/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala index 29e0dd26d4..c174804e9a 100644 --- a/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala +++ b/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala @@ -179,7 +179,7 @@ extends Logging { initialized = false } - val BlockSize = System.getProperty("spark.broadcast.blockSize", "2048").toInt * 1024 + val BlockSize = System.getProperty("spark.broadcast.blockSize", "4096").toInt * 1024 def blockifyObject[IN](obj: IN): TorrentInfo = { val byteArray = Utils.serialize[IN](obj) diff --git a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala new file mode 100644 index 0000000000..4b96d0c9dd --- /dev/null +++ b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.examples + +import org.apache.spark.SparkContext + +object BroadcastTest2 { + def main(args: Array[String]) { + if (args.length == 0) { + System.err.println("Usage: BroadcastTest2 [slices] [numElem] [broadcastAlgo] [blockSize]") + System.exit(1) + } + + val bcName = if (args.length > 3) args(3) else "Http" + val blockSize = if (args.length > 4) args(4) else "4096" + + System.setProperty("spark.broadcast.factory", "org.apache.spark.broadcast." + bcName + "BroadcastFactory") + System.setProperty("spark.broadcast.blockSize", blockSize) + + val sc = new SparkContext(args(0), "Broadcast Test 2", + System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR"))) + + val slices = if (args.length > 1) args(1).toInt else 2 + val num = if (args.length > 2) args(2).toInt else 1000000 + + var arr1 = new Array[Int](num) + for (i <- 0 until arr1.length) { + arr1(i) = i + } + + for (i <- 0 until 3) { + println("Iteration " + i) + println("===========") + val startTime = System.nanoTime + val barr1 = sc.broadcast(arr1) + sc.parallelize(1 to 10, slices).foreach { + i => println(barr1.value.size) + } + println("Iteration %d took %.0f milliseconds".format(i, (System.nanoTime - startTime) / 1E6)) + } + + System.exit(0) + } +} -- cgit v1.2.3 From e96bd0068f13907a45507030e9ca0b178c193823 Mon Sep 17 00:00:00 2001 From: Mosharaf Chowdhury Date: Wed, 16 Oct 2013 21:28:03 -0700 Subject: BroadcastTest2 --> BroadcastTest --- .../org/apache/spark/examples/BroadcastTest.scala | 15 ++++-- .../org/apache/spark/examples/BroadcastTest2.scala | 59 ---------------------- 2 files changed, 12 insertions(+), 62 deletions(-) delete mode 100644 examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala (limited to 'examples') diff --git a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala index 868ff81f67..529709c2f9 100644 --- a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala +++ b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala @@ -22,12 +22,19 @@ import org.apache.spark.SparkContext object BroadcastTest { def main(args: Array[String]) { if (args.length == 0) { - System.err.println("Usage: BroadcastTest [] [numElem]") + System.err.println("Usage: BroadcastTest [slices] [numElem] [broadcastAlgo] [blockSize]") System.exit(1) } - val sc = new SparkContext(args(0), "Broadcast Test", + val bcName = if (args.length > 3) args(3) else "Http" + val blockSize = if (args.length > 4) args(4) else "4096" + + System.setProperty("spark.broadcast.factory", "org.apache.spark.broadcast." + bcName + "BroadcastFactory") + System.setProperty("spark.broadcast.blockSize", blockSize) + + val sc = new SparkContext(args(0), "Broadcast Test 2", System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR"))) + val slices = if (args.length > 1) args(1).toInt else 2 val num = if (args.length > 2) args(2).toInt else 1000000 @@ -36,13 +43,15 @@ object BroadcastTest { arr1(i) = i } - for (i <- 0 until 2) { + for (i <- 0 until 3) { println("Iteration " + i) println("===========") + val startTime = System.nanoTime val barr1 = sc.broadcast(arr1) sc.parallelize(1 to 10, slices).foreach { i => println(barr1.value.size) } + println("Iteration %d took %.0f milliseconds".format(i, (System.nanoTime - startTime) / 1E6)) } System.exit(0) diff --git a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala deleted file mode 100644 index 4b96d0c9dd..0000000000 --- a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest2.scala +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.examples - -import org.apache.spark.SparkContext - -object BroadcastTest2 { - def main(args: Array[String]) { - if (args.length == 0) { - System.err.println("Usage: BroadcastTest2 [slices] [numElem] [broadcastAlgo] [blockSize]") - System.exit(1) - } - - val bcName = if (args.length > 3) args(3) else "Http" - val blockSize = if (args.length > 4) args(4) else "4096" - - System.setProperty("spark.broadcast.factory", "org.apache.spark.broadcast." + bcName + "BroadcastFactory") - System.setProperty("spark.broadcast.blockSize", blockSize) - - val sc = new SparkContext(args(0), "Broadcast Test 2", - System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR"))) - - val slices = if (args.length > 1) args(1).toInt else 2 - val num = if (args.length > 2) args(2).toInt else 1000000 - - var arr1 = new Array[Int](num) - for (i <- 0 until arr1.length) { - arr1(i) = i - } - - for (i <- 0 until 3) { - println("Iteration " + i) - println("===========") - val startTime = System.nanoTime - val barr1 = sc.broadcast(arr1) - sc.parallelize(1 to 10, slices).foreach { - i => println(barr1.value.size) - } - println("Iteration %d took %.0f milliseconds".format(i, (System.nanoTime - startTime) / 1E6)) - } - - System.exit(0) - } -} -- cgit v1.2.3 From 6ec39829e9204c742e364d48c23e106625bba17d Mon Sep 17 00:00:00 2001 From: Prabeesh K Date: Fri, 18 Oct 2013 17:00:28 +0530 Subject: Update MQTTWordCount.scala --- .../spark/streaming/examples/MQTTWordCount.scala | 29 +++++++++++----------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'examples') diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala index 04e21bef5e..be6587b316 100644 --- a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala @@ -22,12 +22,12 @@ import org.apache.spark.streaming.StreamingContext._ import org.apache.spark.streaming.dstream.MQTTReceiver import org.apache.spark.storage.StorageLevel -import org.eclipse.paho.client.mqttv3.MqttClient; -import org.eclipse.paho.client.mqttv3.MqttClientPersistence; -import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence; -import org.eclipse.paho.client.mqttv3.MqttException; -import org.eclipse.paho.client.mqttv3.MqttMessage; -import org.eclipse.paho.client.mqttv3.MqttTopic; +import org.eclipse.paho.client.mqttv3.MqttClient +import org.eclipse.paho.client.mqttv3.MqttClientPersistence +import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence +import org.eclipse.paho.client.mqttv3.MqttException +import org.eclipse.paho.client.mqttv3.MqttMessage +import org.eclipse.paho.client.mqttv3.MqttTopic /** * A simple Mqtt publisher for demonstration purposes, repeatedly publishes Space separated String Message "hello mqtt demo for spark streaming" @@ -47,24 +47,24 @@ object MQTTPublisher { val Seq(brokerUrl, topic) = args.toSeq try { - var peristance:MqttClientPersistence =new MqttDefaultFilePersistence("/tmp"); - client = new MqttClient(brokerUrl, MqttClient.generateClientId(), peristance); + var peristance:MqttClientPersistence =new MqttDefaultFilePersistence("/tmp") + client = new MqttClient(brokerUrl, MqttClient.generateClientId(), peristance) } catch { - case e: MqttException => println("Exception Caught: " + e); + case e: MqttException => println("Exception Caught: " + e) } - client.connect(); + client.connect() val msgtopic: MqttTopic = client.getTopic(topic); - val msg: String = "hello mqtt demo for spark streaming"; + val msg: String = "hello mqtt demo for spark streaming" while (true) { - val message: MqttMessage = new MqttMessage(String.valueOf(msg).getBytes()); + val message: MqttMessage = new MqttMessage(String.valueOf(msg).getBytes()) msgtopic.publish(message); - println("Published data. topic: " + msgtopic.getName() + " Message: " + message); + println("Published data. topic: " + msgtopic.getName() + " Message: " + message) } - client.disconnect(); + client.disconnect() } } @@ -109,4 +109,3 @@ object MQTTWordCount { ssc.start() } } - -- cgit v1.2.3 From 4e44d65b5ef7fb7c8d24186dd9e98ec10d9877b7 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Sat, 19 Oct 2013 12:35:55 -0700 Subject: Exclusion rules for Maven build files. --- examples/pom.xml | 8 ++++++ pom.xml | 74 ++++++++++++++++++++++--------------------------------- streaming/pom.xml | 22 +++++++++++++++++ 3 files changed, 60 insertions(+), 44 deletions(-) (limited to 'examples') diff --git a/examples/pom.xml b/examples/pom.xml index b8c020a321..15399a8a33 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -137,6 +137,14 @@ org.apache.cassandra.deps avro + + org.sonatype.sisu.inject + * + + + org.xerial.snappy + * + diff --git a/pom.xml b/pom.xml index 5ad7b1befb..54f100c37f 100644 --- a/pom.xml +++ b/pom.xml @@ -250,16 +250,34 @@ com.typesafe.akka akka-actor ${akka.version} + + + org.jboss.netty + netty + + com.typesafe.akka akka-remote ${akka.version} + + + org.jboss.netty + netty + + com.typesafe.akka akka-slf4j ${akka.version} + + + org.jboss.netty + netty + + it.unimi.dsi @@ -394,19 +412,11 @@ org.codehaus.jackson - jackson-core-asl - - - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-jaxrs + * - org.codehaus.jackson - jackson-xc + org.sonatype.sisu.inject + * @@ -430,19 +440,11 @@ org.codehaus.jackson - jackson-core-asl - - - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-jaxrs + * - org.codehaus.jackson - jackson-xc + org.sonatype.sisu.inject + * @@ -461,19 +463,11 @@ org.codehaus.jackson - jackson-core-asl + * - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-jaxrs - - - org.codehaus.jackson - jackson-xc + org.sonatype.sisu.inject + * @@ -492,19 +486,11 @@ org.codehaus.jackson - jackson-core-asl + * - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-jaxrs - - - org.codehaus.jackson - jackson-xc + org.sonatype.sisu.inject + * diff --git a/streaming/pom.xml b/streaming/pom.xml index 3b25fb49fb..bcbed1644a 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -69,17 +69,33 @@ org.jboss.netty netty + + org.xerial.snappy + * + com.github.sgroschupf zkclient 0.1 + + + org.jboss.netty + netty + + org.twitter4j twitter4j-stream 3.0.3 + + + org.jboss.netty + netty + + org.scala-lang @@ -89,6 +105,12 @@ com.typesafe.akka akka-zeromq 2.0.3 + + + org.jboss.netty + netty + + org.scalatest -- cgit v1.2.3 From dbafa11396d7c1619f5523fba5ae6abed07e90d9 Mon Sep 17 00:00:00 2001 From: Prabeesh K Date: Tue, 22 Oct 2013 08:50:34 +0530 Subject: Update MQTTWordCount.scala --- .../scala/org/apache/spark/streaming/examples/MQTTWordCount.scala | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'examples') diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala index be6587b316..7d06505df7 100644 --- a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala @@ -30,7 +30,8 @@ import org.eclipse.paho.client.mqttv3.MqttMessage import org.eclipse.paho.client.mqttv3.MqttTopic /** - * A simple Mqtt publisher for demonstration purposes, repeatedly publishes Space separated String Message "hello mqtt demo for spark streaming" + * A simple Mqtt publisher for demonstration purposes, repeatedly publishes + * Space separated String Message "hello mqtt demo for spark streaming" */ object MQTTPublisher { @@ -99,13 +100,13 @@ object MQTTWordCount { val Seq(master, brokerUrl, topic) = args.toSeq - val ssc = new StreamingContext(master, "MqttWordCount", Seconds(2), System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR"))) + val ssc = new StreamingContext(master, "MqttWordCount", Seconds(2), System.getenv("SPARK_HOME"), + Seq(System.getenv("SPARK_EXAMPLES_JAR"))) val lines = ssc.mqttStream(brokerUrl, topic, StorageLevel.MEMORY_ONLY) val words = lines.flatMap(x => x.toString.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) wordCounts.print() - ssc.start() } } -- cgit v1.2.3 From 9ca1bd95305a904637075e4f5747b28571114fb1 Mon Sep 17 00:00:00 2001 From: Prabeesh K Date: Tue, 22 Oct 2013 09:05:57 +0530 Subject: Update MQTTWordCount.scala --- .../scala/org/apache/spark/streaming/examples/MQTTWordCount.scala | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'examples') diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala index 7d06505df7..af698a01d5 100644 --- a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala @@ -33,15 +33,13 @@ import org.eclipse.paho.client.mqttv3.MqttTopic * A simple Mqtt publisher for demonstration purposes, repeatedly publishes * Space separated String Message "hello mqtt demo for spark streaming" */ - object MQTTPublisher { var client: MqttClient = _ def main(args: Array[String]) { if (args.length < 2) { - System.err.println( - "Usage: MQTTPublisher ") + System.err.println("Usage: MQTTPublisher ") System.exit(1) } @@ -52,7 +50,6 @@ object MQTTPublisher { client = new MqttClient(brokerUrl, MqttClient.generateClientId(), peristance) } catch { case e: MqttException => println("Exception Caught: " + e) - } client.connect() @@ -66,7 +63,6 @@ object MQTTPublisher { println("Published data. topic: " + msgtopic.getName() + " Message: " + message) } client.disconnect() - } } @@ -87,7 +83,6 @@ object MQTTPublisher { * and run the example as * `$ ./run-example org.apache.spark.streaming.examples.MQTTWordCount local[2] tcp://localhost:1883 foo` */ - object MQTTWordCount { def main(args: Array[String]) { -- cgit v1.2.3 From 05a0df2b9e0e4c3d032404187c0adf6d6d881860 Mon Sep 17 00:00:00 2001 From: Ali Ghodsi Date: Fri, 6 Sep 2013 21:30:42 -0700 Subject: Makes Spark SIMR ready. --- .../main/scala/org/apache/spark/SparkContext.scala | 22 +++++-- .../executor/CoarseGrainedExecutorBackend.scala | 5 ++ .../cluster/CoarseGrainedClusterMessage.scala | 4 ++ .../cluster/CoarseGrainedSchedulerBackend.scala | 20 +++++++ .../scheduler/cluster/SimrSchedulerBackend.scala | 69 ++++++++++++++++++++++ .../scala/org/apache/spark/examples/SparkPi.scala | 2 +- .../scala/org/apache/spark/repl/SparkILoop.scala | 14 +++++ 7 files changed, 131 insertions(+), 5 deletions(-) create mode 100644 core/src/main/scala/org/apache/spark/scheduler/cluster/SimrSchedulerBackend.scala (limited to 'examples') diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 564466cfd5..3b39c97260 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -56,15 +56,21 @@ import org.apache.spark.partial.{ApproximateEvaluator, PartialResult} import org.apache.spark.rdd._ import org.apache.spark.scheduler._ import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, - SparkDeploySchedulerBackend, ClusterScheduler} + SparkDeploySchedulerBackend, ClusterScheduler, SimrSchedulerBackend} +import org.apache.spark.scheduler.local.LocalScheduler import org.apache.spark.scheduler.cluster.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend} import org.apache.spark.scheduler.local.LocalScheduler import org.apache.spark.storage.{BlockManagerSource, RDDInfo, StorageStatus, StorageUtils} import org.apache.spark.ui.SparkUI import org.apache.spark.util.{ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedHashMap, Utils} - - +import org.apache.spark.scheduler.StageInfo +import org.apache.spark.storage.RDDInfo +import org.apache.spark.storage.StorageStatus +import scala.Some +import org.apache.spark.scheduler.StageInfo +import org.apache.spark.storage.RDDInfo +import org.apache.spark.storage.StorageStatus /** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark @@ -127,7 +133,7 @@ class SparkContext( val startTime = System.currentTimeMillis() // Add each JAR given through the constructor - if (jars != null) { + if (jars != null && jars != Seq(null)) { jars.foreach { addJar(_) } } @@ -158,6 +164,8 @@ class SparkContext( val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connection to Mesos cluster val MESOS_REGEX = """mesos://(.*)""".r + //Regular expression for connection to Simr cluster + val SIMR_REGEX = """simr://(.*)""".r master match { case "local" => @@ -176,6 +184,12 @@ class SparkContext( scheduler.initialize(backend) scheduler + case SIMR_REGEX(simrUrl) => + val scheduler = new ClusterScheduler(this) + val backend = new SimrSchedulerBackend(scheduler, this, simrUrl) + scheduler.initialize(backend) + scheduler + case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) => // Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will just hang. val memoryPerSlaveInt = memoryPerSlave.toInt diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala index 52b1c492b2..80ff4c59cb 100644 --- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala +++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala @@ -80,6 +80,11 @@ private[spark] class CoarseGrainedExecutorBackend( case Terminated(_) | RemoteClientDisconnected(_, _) | RemoteClientShutdown(_, _) => logError("Driver terminated or disconnected! Shutting down.") System.exit(1) + + case StopExecutor => + logInfo("Driver commanded a shutdown") + context.stop(self) + context.system.shutdown() } override def statusUpdate(taskId: Long, state: TaskState, data: ByteBuffer) { diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala index a8230ec6bc..53316dae2a 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala @@ -60,6 +60,10 @@ private[spark] object CoarseGrainedClusterMessages { case object StopDriver extends CoarseGrainedClusterMessage + case object StopExecutor extends CoarseGrainedClusterMessage + + case object StopExecutors extends CoarseGrainedClusterMessage + case class RemoveExecutor(executorId: String, reason: String) extends CoarseGrainedClusterMessage } diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala index c0f1c6dbad..80a9b4667d 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala @@ -101,6 +101,13 @@ class CoarseGrainedSchedulerBackend(scheduler: ClusterScheduler, actorSystem: Ac sender ! true context.stop(self) + case StopExecutors => + logInfo("Asking each executor to shut down") + for (executor <- executorActor.values) { + executor ! StopExecutor + } + sender ! true + case RemoveExecutor(executorId, reason) => removeExecutor(executorId, reason) sender ! true @@ -170,6 +177,19 @@ class CoarseGrainedSchedulerBackend(scheduler: ClusterScheduler, actorSystem: Ac private val timeout = Duration.create(System.getProperty("spark.akka.askTimeout", "10").toLong, "seconds") + def stopExecutors() { + try { + if (driverActor != null) { + logInfo("Shutting down all executors") + val future = driverActor.ask(StopExecutors)(timeout) + Await.result(future, timeout) + } + } catch { + case e: Exception => + throw new SparkException("Error asking standalone scheduler to shut down executors", e) + } + } + override def stop() { try { if (driverActor != null) { diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/SimrSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/SimrSchedulerBackend.scala new file mode 100644 index 0000000000..ae56244979 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/SimrSchedulerBackend.scala @@ -0,0 +1,69 @@ +package org.apache.spark.scheduler.cluster + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +import org.apache.spark.{Logging, SparkContext} +import org.apache.hadoop.conf.Configuration +import org.apache.hadoop.fs.{Path, FileSystem} + +private[spark] class SimrSchedulerBackend( + scheduler: ClusterScheduler, + sc: SparkContext, + driverFilePath: String) + extends CoarseGrainedSchedulerBackend(scheduler, sc.env.actorSystem) + with Logging { + + val tmpPath = new Path(driverFilePath + "_tmp"); + val filePath = new Path(driverFilePath); + + val maxCores = System.getProperty("spark.simr.executor.cores", "1").toInt + + override def start() { + super.start() + + val driverUrl = "akka://spark@%s:%s/user/%s".format( + System.getProperty("spark.driver.host"), System.getProperty("spark.driver.port"), + CoarseGrainedSchedulerBackend.ACTOR_NAME) + + val conf = new Configuration() + val fs = FileSystem.get(conf) + + logInfo("Writing to HDFS file: " + driverFilePath); + logInfo("Writing AKKA address: " + driverUrl); + + // Create temporary file to prevent race condition where executors get empty driverUrl file + val temp = fs.create(tmpPath, true) + temp.writeUTF(driverUrl) + temp.writeInt(maxCores) + temp.close() + + // "Atomic" rename + fs.rename(tmpPath, filePath); + } + + override def stop() { + val conf = new Configuration() + val fs = FileSystem.get(conf) + fs.delete(new Path(driverFilePath), false); + super.stopExecutors() + super.stop() + } +} + + diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala index 5a2bc9b0d0..a689e5a360 100644 --- a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala +++ b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala @@ -38,6 +38,6 @@ object SparkPi { if (x*x + y*y < 1) 1 else 0 }.reduce(_ + _) println("Pi is roughly " + 4.0 * count / n) - System.exit(0) + spark.stop() } } diff --git a/repl/src/main/scala/org/apache/spark/repl/SparkILoop.scala b/repl/src/main/scala/org/apache/spark/repl/SparkILoop.scala index 48a8fa9328..0ced284da6 100644 --- a/repl/src/main/scala/org/apache/spark/repl/SparkILoop.scala +++ b/repl/src/main/scala/org/apache/spark/repl/SparkILoop.scala @@ -633,6 +633,20 @@ class SparkILoop(in0: Option[BufferedReader], val out: PrintWriter, val master: Result(true, shouldReplay) } + def addAllClasspath(args: Seq[String]): Unit = { + var added = false + var totalClasspath = "" + for (arg <- args) { + val f = File(arg).normalize + if (f.exists) { + added = true + addedClasspath = ClassPath.join(addedClasspath, f.path) + totalClasspath = ClassPath.join(settings.classpath.value, addedClasspath) + } + } + if (added) replay() + } + def addClasspath(arg: String): Unit = { val f = File(arg).normalize if (f.exists) { -- cgit v1.2.3