Skip to content

Commit

Permalink
Add 'codec' parameter to Producer
Browse files Browse the repository at this point in the history
Adds a codec parameter to Producer.__init__ that lets the user choose
a compression codec to use for all messages sent by it.
  • Loading branch information
Patrick Lucas committed May 3, 2014
1 parent 2415609 commit 671b74a
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 24 deletions.
42 changes: 33 additions & 9 deletions kafka/producer.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,10 @@

from kafka.common import ProduceRequest, TopicAndPartition
from kafka.partitioner import HashedPartitioner
from kafka.protocol import create_message
from kafka.protocol import (
CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY, ALL_CODECS,
create_message, create_gzip_message, create_snappy_message,
)

log = logging.getLogger("kafka")

Expand All @@ -20,7 +23,7 @@
STOP_ASYNC_PRODUCER = -1


def _send_upstream(queue, client, batch_time, batch_size,
def _send_upstream(queue, client, codec, batch_time, batch_size,
req_acks, ack_timeout):
"""
Listen on the queue for a specified number of messages or till
Expand Down Expand Up @@ -61,7 +64,14 @@ def _send_upstream(queue, client, batch_time, batch_size,

# Send collected requests upstream
reqs = []
for topic_partition, messages in msgset.items():
for topic_partition, msg in msgset.items():
if codec == CODEC_GZIP:
messages = [create_gzip_message(msg)]
elif codec == CODEC_SNAPPY:
messages = [create_snappy_message(msg)]
else:
messages = [create_message(m) for m in msg]

req = ProduceRequest(topic_partition.topic,
topic_partition.partition,
messages)
Expand Down Expand Up @@ -101,6 +111,7 @@ class Producer(object):
def __init__(self, client, async=False,
req_acks=ACK_AFTER_LOCAL_WRITE,
ack_timeout=DEFAULT_ACK_TIMEOUT,
codec=None,
batch_send=False,
batch_send_every_n=BATCH_SEND_MSG_COUNT,
batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL):
Expand All @@ -118,11 +129,17 @@ def __init__(self, client, async=False,
self.req_acks = req_acks
self.ack_timeout = ack_timeout

if codec is None:
codec = CODEC_NONE
assert codec in ALL_CODECS
self.codec = codec

if self.async:
self.queue = Queue() # Messages are sent through this queue
self.proc = Process(target=_send_upstream,
args=(self.queue,
self.client.copy(),
self.codec,
batch_send_every_t,
batch_send_every_n,
self.req_acks,
Expand All @@ -138,11 +155,16 @@ def send_messages(self, topic, partition, *msg):
"""
if self.async:
for m in msg:
self.queue.put((TopicAndPartition(topic, partition),
create_message(m)))
self.queue.put((TopicAndPartition(topic, partition), m))
resp = []
else:
messages = [create_message(m) for m in msg]
if self.codec == CODEC_GZIP:
messages = [create_gzip_message(msg)]
elif self.codec == CODEC_SNAPPY:
messages = [create_snappy_message(msg)]
else:
messages = [create_message(m) for m in msg]

req = ProduceRequest(topic, partition, messages)
try:
resp = self.client.send_produce_request([req], acks=self.req_acks,
Expand All @@ -167,7 +189,7 @@ def stop(self, timeout=1):

class SimpleProducer(Producer):
"""
A simple, round-robbin producer. Each message goes to exactly one partition
A simple, round-robin producer. Each message goes to exactly one partition
Params:
client - The Kafka client instance to use
Expand All @@ -184,12 +206,13 @@ class SimpleProducer(Producer):
def __init__(self, client, async=False,
req_acks=Producer.ACK_AFTER_LOCAL_WRITE,
ack_timeout=Producer.DEFAULT_ACK_TIMEOUT,
codec=None,
batch_send=False,
batch_send_every_n=BATCH_SEND_MSG_COUNT,
batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL):
self.partition_cycles = {}
super(SimpleProducer, self).__init__(client, async, req_acks,
ack_timeout, batch_send,
ack_timeout, codec, batch_send,
batch_send_every_n,
batch_send_every_t)

Expand Down Expand Up @@ -227,6 +250,7 @@ class KeyedProducer(Producer):
def __init__(self, client, partitioner=None, async=False,
req_acks=Producer.ACK_AFTER_LOCAL_WRITE,
ack_timeout=Producer.DEFAULT_ACK_TIMEOUT,
codec=None,
batch_send=False,
batch_send_every_n=BATCH_SEND_MSG_COUNT,
batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL):
Expand All @@ -236,7 +260,7 @@ def __init__(self, client, partitioner=None, async=False,
self.partitioners = {}

super(KeyedProducer, self).__init__(client, async, req_acks,
ack_timeout, batch_send,
ack_timeout, codec, batch_send,
batch_send_every_n,
batch_send_every_t)

Expand Down
23 changes: 12 additions & 11 deletions kafka/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,12 @@

log = logging.getLogger("kafka")

ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
ALL_CODECS = (CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY)


class KafkaProtocol(object):
"""
Expand All @@ -32,11 +38,6 @@ class KafkaProtocol(object):
OFFSET_COMMIT_KEY = 8
OFFSET_FETCH_KEY = 9

ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02

###################
# Private API #
###################
Expand Down Expand Up @@ -151,17 +152,17 @@ def _decode_message(cls, data, offset):
(key, cur) = read_int_string(data, cur)
(value, cur) = read_int_string(data, cur)

codec = att & KafkaProtocol.ATTRIBUTE_CODEC_MASK
codec = att & ATTRIBUTE_CODEC_MASK

if codec == KafkaProtocol.CODEC_NONE:
if codec == CODEC_NONE:
yield (offset, Message(magic, att, key, value))

elif codec == KafkaProtocol.CODEC_GZIP:
elif codec == CODEC_GZIP:
gz = gzip_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(gz):
yield (offset, msg)

elif codec == KafkaProtocol.CODEC_SNAPPY:
elif codec == CODEC_SNAPPY:
snp = snappy_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(snp):
yield (offset, msg)
Expand Down Expand Up @@ -544,7 +545,7 @@ def create_gzip_message(payloads, key=None):
[create_message(payload) for payload in payloads])

gzipped = gzip_encode(message_set)
codec = KafkaProtocol.ATTRIBUTE_CODEC_MASK & KafkaProtocol.CODEC_GZIP
codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP

return Message(0, 0x00 | codec, key, gzipped)

Expand All @@ -565,6 +566,6 @@ def create_snappy_message(payloads, key=None):
[create_message(payload) for payload in payloads])

snapped = snappy_encode(message_set)
codec = KafkaProtocol.ATTRIBUTE_CODEC_MASK & KafkaProtocol.CODEC_SNAPPY
codec = ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY

return Message(0, 0x00 | codec, key, snapped)
8 changes: 4 additions & 4 deletions test/test_unit.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ def test_create_gzip(self):
payloads = ["v1", "v2"]
msg = create_gzip_message(payloads)
self.assertEqual(msg.magic, 0)
self.assertEqual(msg.attributes, KafkaProtocol.ATTRIBUTE_CODEC_MASK &
KafkaProtocol.CODEC_GZIP)
self.assertEqual(msg.attributes, ATTRIBUTE_CODEC_MASK &
CODEC_GZIP)
self.assertEqual(msg.key, None)
# Need to decode to check since gzipped payload is non-deterministic
decoded = gzip_decode(msg.value)
Expand All @@ -151,8 +151,8 @@ def test_create_snappy(self):
payloads = ["v1", "v2"]
msg = create_snappy_message(payloads)
self.assertEqual(msg.magic, 0)
self.assertEqual(msg.attributes, KafkaProtocol.ATTRIBUTE_CODEC_MASK &
KafkaProtocol.CODEC_SNAPPY)
self.assertEqual(msg.attributes, ATTRIBUTE_CODEC_MASK &
CODEC_SNAPPY)
self.assertEqual(msg.key, None)
expect = ("8\x00\x00\x19\x01@\x10L\x9f[\xc2\x00\x00\xff\xff\xff\xff"
"\x00\x00\x00\x02v1\x19\x1bD\x00\x10\xd5\x96\nx\x00\x00\xff"
Expand Down

0 comments on commit 671b74a

Please sign in to comment.