aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/streaming/kafka.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/streaming/kafka.py')
-rw-r--r--python/pyspark/streaming/kafka.py167
1 files changed, 160 insertions, 7 deletions
diff --git a/python/pyspark/streaming/kafka.py b/python/pyspark/streaming/kafka.py
index 8d610d6569..e278b29003 100644
--- a/python/pyspark/streaming/kafka.py
+++ b/python/pyspark/streaming/kafka.py
@@ -17,11 +17,12 @@
from py4j.java_gateway import Py4JJavaError
+from pyspark.rdd import RDD
from pyspark.storagelevel import StorageLevel
from pyspark.serializers import PairDeserializer, NoOpSerializer
from pyspark.streaming import DStream
-__all__ = ['KafkaUtils', 'utf8_decoder']
+__all__ = ['Broker', 'KafkaUtils', 'OffsetRange', 'TopicAndPartition', 'utf8_decoder']
def utf8_decoder(s):
@@ -67,7 +68,104 @@ class KafkaUtils(object):
except Py4JJavaError as e:
# TODO: use --jar once it also work on driver
if 'ClassNotFoundException' in str(e.java_exception):
- print("""
+ KafkaUtils._printErrorMsg(ssc.sparkContext)
+ raise e
+ ser = PairDeserializer(NoOpSerializer(), NoOpSerializer())
+ stream = DStream(jstream, ssc, ser)
+ return stream.map(lambda k_v: (keyDecoder(k_v[0]), valueDecoder(k_v[1])))
+
+ @staticmethod
+ def createDirectStream(ssc, topics, kafkaParams, fromOffsets={},
+ keyDecoder=utf8_decoder, valueDecoder=utf8_decoder):
+ """
+ .. note:: Experimental
+
+ Create an input stream that directly pulls messages from a Kafka Broker and specific offset.
+
+ This is not a receiver based Kafka input stream, it directly pulls the message from Kafka
+ in each batch duration and processed without storing.
+
+ This does not use Zookeeper to store offsets. The consumed offsets are tracked
+ by the stream itself. For interoperability with Kafka monitoring tools that depend on
+ Zookeeper, you have to update Kafka/Zookeeper yourself from the streaming application.
+ You can access the offsets used in each batch from the generated RDDs (see
+
+ To recover from driver failures, you have to enable checkpointing in the StreamingContext.
+ The information on consumed offset can be recovered from the checkpoint.
+ See the programming guide for details (constraints, etc.).
+
+ :param ssc: StreamingContext object.
+ :param topics: list of topic_name to consume.
+ :param kafkaParams: Additional params for Kafka.
+ :param fromOffsets: Per-topic/partition Kafka offsets defining the (inclusive) starting
+ point of the stream.
+ :param keyDecoder: A function used to decode key (default is utf8_decoder).
+ :param valueDecoder: A function used to decode value (default is utf8_decoder).
+ :return: A DStream object
+ """
+ if not isinstance(topics, list):
+ raise TypeError("topics should be list")
+ if not isinstance(kafkaParams, dict):
+ raise TypeError("kafkaParams should be dict")
+
+ try:
+ helperClass = ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
+ .loadClass("org.apache.spark.streaming.kafka.KafkaUtilsPythonHelper")
+ helper = helperClass.newInstance()
+
+ jfromOffsets = dict([(k._jTopicAndPartition(helper),
+ v) for (k, v) in fromOffsets.items()])
+ jstream = helper.createDirectStream(ssc._jssc, kafkaParams, set(topics), jfromOffsets)
+ except Py4JJavaError as e:
+ if 'ClassNotFoundException' in str(e.java_exception):
+ KafkaUtils._printErrorMsg(ssc.sparkContext)
+ raise e
+
+ ser = PairDeserializer(NoOpSerializer(), NoOpSerializer())
+ stream = DStream(jstream, ssc, ser)
+ return stream.map(lambda k_v: (keyDecoder(k_v[0]), valueDecoder(k_v[1])))
+
+ @staticmethod
+ def createRDD(sc, kafkaParams, offsetRanges, leaders={},
+ keyDecoder=utf8_decoder, valueDecoder=utf8_decoder):
+ """
+ .. note:: Experimental
+
+ Create a RDD from Kafka using offset ranges for each topic and partition.
+ :param sc: SparkContext object
+ :param kafkaParams: Additional params for Kafka
+ :param offsetRanges: list of offsetRange to specify topic:partition:[start, end) to consume
+ :param leaders: Kafka brokers for each TopicAndPartition in offsetRanges. May be an empty
+ map, in which case leaders will be looked up on the driver.
+ :param keyDecoder: A function used to decode key (default is utf8_decoder)
+ :param valueDecoder: A function used to decode value (default is utf8_decoder)
+ :return: A RDD object
+ """
+ if not isinstance(kafkaParams, dict):
+ raise TypeError("kafkaParams should be dict")
+ if not isinstance(offsetRanges, list):
+ raise TypeError("offsetRanges should be list")
+
+ try:
+ helperClass = sc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
+ .loadClass("org.apache.spark.streaming.kafka.KafkaUtilsPythonHelper")
+ helper = helperClass.newInstance()
+ joffsetRanges = [o._jOffsetRange(helper) for o in offsetRanges]
+ jleaders = dict([(k._jTopicAndPartition(helper),
+ v._jBroker(helper)) for (k, v) in leaders.items()])
+ jrdd = helper.createRDD(sc._jsc, kafkaParams, joffsetRanges, jleaders)
+ except Py4JJavaError as e:
+ if 'ClassNotFoundException' in str(e.java_exception):
+ KafkaUtils._printErrorMsg(sc)
+ raise e
+
+ ser = PairDeserializer(NoOpSerializer(), NoOpSerializer())
+ rdd = RDD(jrdd, sc, ser)
+ return rdd.map(lambda k_v: (keyDecoder(k_v[0]), valueDecoder(k_v[1])))
+
+ @staticmethod
+ def _printErrorMsg(sc):
+ print("""
________________________________________________________________________________________________
Spark Streaming's Kafka libraries not found in class path. Try one of the following.
@@ -85,8 +183,63 @@ ________________________________________________________________________________
________________________________________________________________________________________________
-""" % (ssc.sparkContext.version, ssc.sparkContext.version))
- raise e
- ser = PairDeserializer(NoOpSerializer(), NoOpSerializer())
- stream = DStream(jstream, ssc, ser)
- return stream.map(lambda k_v: (keyDecoder(k_v[0]), valueDecoder(k_v[1])))
+""" % (sc.version, sc.version))
+
+
+class OffsetRange(object):
+ """
+ Represents a range of offsets from a single Kafka TopicAndPartition.
+ """
+
+ def __init__(self, topic, partition, fromOffset, untilOffset):
+ """
+ Create a OffsetRange to represent range of offsets
+ :param topic: Kafka topic name.
+ :param partition: Kafka partition id.
+ :param fromOffset: Inclusive starting offset.
+ :param untilOffset: Exclusive ending offset.
+ """
+ self._topic = topic
+ self._partition = partition
+ self._fromOffset = fromOffset
+ self._untilOffset = untilOffset
+
+ def _jOffsetRange(self, helper):
+ return helper.createOffsetRange(self._topic, self._partition, self._fromOffset,
+ self._untilOffset)
+
+
+class TopicAndPartition(object):
+ """
+ Represents a specific top and partition for Kafka.
+ """
+
+ def __init__(self, topic, partition):
+ """
+ Create a Python TopicAndPartition to map to the Java related object
+ :param topic: Kafka topic name.
+ :param partition: Kafka partition id.
+ """
+ self._topic = topic
+ self._partition = partition
+
+ def _jTopicAndPartition(self, helper):
+ return helper.createTopicAndPartition(self._topic, self._partition)
+
+
+class Broker(object):
+ """
+ Represent the host and port info for a Kafka broker.
+ """
+
+ def __init__(self, host, port):
+ """
+ Create a Python Broker to map to the Java related object.
+ :param host: Broker's hostname.
+ :param port: Broker's port.
+ """
+ self._host = host
+ self._port = port
+
+ def _jBroker(self, helper):
+ return helper.createBroker(self._host, self._port)