Skip to content

Commit

Permalink
Add pylint to tox.ini; test both kafka and test; default to error-che…
Browse files Browse the repository at this point in the history
…cking only; fixup errors; skip kafka/queue.py
  • Loading branch information
Dana Powers committed Aug 25, 2014
1 parent d73d169 commit e151529
Show file tree
Hide file tree
Showing 10 changed files with 54 additions and 58 deletions.
4 changes: 2 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ python:
- pypy

env:
-
- UNIT_AND_LINT_ONLY=true
- KAFKA_VERSION=0.8.0
- KAFKA_VERSION=0.8.1
- KAFKA_VERSION=0.8.1.1
Expand Down Expand Up @@ -35,4 +35,4 @@ deploy:
# branch: master

script:
- tox -e `./travis_selector.sh $TRAVIS_PYTHON_VERSION`
- if [ -n "$UNIT_AND_LINT_ONLY" ]; then tox -e lint,`./travis_selector.sh $TRAVIS_PYTHON_VERSION`; else tox -e `./travis_selector.sh $TRAVIS_PYTHON_VERSION`; fi
2 changes: 1 addition & 1 deletion kafka/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def __repr__(self):
def _raise_on_response_error(self, resp):
try:
kafka.common.check_error(resp)
except (UnknownTopicOrPartitionError, NotLeaderForPartitionError) as e:
except (UnknownTopicOrPartitionError, NotLeaderForPartitionError):
self.reset_topic_metadata(resp.topic)
raise

Expand Down
6 changes: 3 additions & 3 deletions kafka/conn.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ def _read_bytes(self, num_bytes):
# that the socket is in error. we will never get
# more data from this socket
if data == '':
raise socket.error('Not enough data to read message -- did server kill socket?')
raise socket.error("Not enough data to read message -- did server kill socket?")

except socket.error:
log.exception('Unable to receive data from Kafka')
self._raise_connection_error()
Expand Down Expand Up @@ -170,7 +170,7 @@ def close(self):
except socket.error:
pass

# Closing the socket should always succeed
# Closing the socket should always succeed
self._sock.close()
self._sock = None
else:
Expand Down
49 changes: 22 additions & 27 deletions test/fixtures.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging
import glob
import os
import os.path
import shutil
Expand All @@ -9,8 +8,8 @@
import uuid

from urlparse import urlparse
from service import ExternalService, SpawnedService
from testutil import get_open_port
from test.service import ExternalService, SpawnedService
from test.testutil import get_open_port

class Fixture(object):
kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0')
Expand All @@ -36,23 +35,23 @@ def download_official_distribution(cls,
output_file = os.path.join(output_dir, distfile + '.tgz')

if os.path.isfile(output_file):
logging.info("Found file already on disk: %s" % output_file)
logging.info("Found file already on disk: %s", output_file)
return output_file

# New tarballs are .tgz, older ones are sometimes .tar.gz
try:
url = url_base + distfile + '.tgz'
logging.info("Attempting to download %s" % (url,))
logging.info("Attempting to download %s", url)
response = urllib2.urlopen(url)
except urllib2.HTTPError:
logging.exception("HTTP Error")
url = url_base + distfile + '.tar.gz'
logging.info("Attempting to download %s" % (url,))
logging.info("Attempting to download %s", url)
response = urllib2.urlopen(url)

logging.info("Saving distribution file to %s" % (output_file,))
with open(os.path.join(output_dir, distfile + '.tgz'), 'w') as f:
f.write(response.read())
logging.info("Saving distribution file to %s", output_file)
with open(output_file, 'w') as output_file_fd:
output_file_fd.write(response.read())

return output_file

Expand Down Expand Up @@ -117,11 +116,9 @@ def open(self):
self.render_template(template, properties, vars(self))

# Configure Zookeeper child process
self.child = SpawnedService(args=self.kafka_run_class_args(
"org.apache.zookeeper.server.quorum.QuorumPeerMain",
properties),
env=self.kafka_run_class_env()
)
args = self.kafka_run_class_args("org.apache.zookeeper.server.quorum.QuorumPeerMain", properties)
env = self.kafka_run_class_env()
self.child = SpawnedService(args, env)

# Party!
self.out("Starting...")
Expand Down Expand Up @@ -162,7 +159,7 @@ def __init__(self, host, port, broker_id, zk_host, zk_port, zk_chroot, replicas=
self.zk_port = zk_port
self.zk_chroot = zk_chroot

self.replicas = replicas
self.replicas = replicas
self.partitions = partitions

self.tmp_dir = None
Expand Down Expand Up @@ -199,21 +196,19 @@ def open(self):
self.render_template(template, properties, vars(self))

# Configure Kafka child process
self.child = SpawnedService(args=self.kafka_run_class_args(
"kafka.Kafka", properties),
env=self.kafka_run_class_env()
)
args = self.kafka_run_class_args("kafka.Kafka", properties)
env = self.kafka_run_class_env()
self.child = SpawnedService(args, env)

# Party!
self.out("Creating Zookeeper chroot node...")
proc = subprocess.Popen(self.kafka_run_class_args(
"org.apache.zookeeper.ZooKeeperMain",
"-server", "%s:%d" % (self.zk_host, self.zk_port),
"create", "/%s" % self.zk_chroot, "kafka-python"
),
env=self.kafka_run_class_env(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
args = self.kafka_run_class_args("org.apache.zookeeper.ZooKeeperMain",
"-server", "%s:%d" % (self.zk_host, self.zk_port),
"create",
"/%s" % self.zk_chroot,
"kafka-python")
env = self.kafka_run_class_env()
proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

if proc.wait() != 0:
self.out("Failed to create Zookeeper chroot node")
Expand Down
9 changes: 5 additions & 4 deletions test/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import re
import select
import subprocess
import sys
import threading
import time

Expand All @@ -14,7 +13,7 @@

class ExternalService(object):
def __init__(self, host, port):
print("Using already running service at %s:%d" % (host, port))
logging.info("Using already running service at %s:%d", host, port)
self.host = host
self.port = port

Expand All @@ -26,9 +25,11 @@ def close(self):


class SpawnedService(threading.Thread):
def __init__(self, args=[], env=None):
def __init__(self, args=None, env=None):
threading.Thread.__init__(self)

if args is None:
raise TypeError("args parameter is required")
self.args = args
self.env = env
self.captured_stdout = []
Expand All @@ -49,7 +50,7 @@ def run_with_handles(self):
alive = True

while True:
(rds, wds, xds) = select.select([self.child.stdout, self.child.stderr], [], [], 1)
(rds, _, _) = select.select([self.child.stdout, self.child.stderr], [], [], 1)

if self.child.stdout in rds:
line = self.child.stdout.readline()
Expand Down
7 changes: 1 addition & 6 deletions test/test_client.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
import os
import random
import struct
import unittest2

from mock import MagicMock, patch
Expand All @@ -11,9 +8,7 @@
TopicAndPartition, KafkaUnavailableError,
LeaderUnavailableError, PartitionUnavailableError
)
from kafka.protocol import (
create_message, KafkaProtocol
)
from kafka.protocol import create_message

class TestKafkaClient(unittest2.TestCase):
def test_init_with_list(self):
Expand Down
8 changes: 3 additions & 5 deletions test/test_client_integration.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
import os
import random
import socket
import time
import unittest2

import kafka
from kafka.common import *
from fixtures import ZookeeperFixture, KafkaFixture
from testutil import *
from test.fixtures import ZookeeperFixture, KafkaFixture
from test.testutil import *

class TestKafkaClientIntegration(KafkaIntegrationTestCase):
@classmethod
Expand All @@ -34,7 +32,7 @@ def test_timeout(self):

with Timer() as t:
with self.assertRaises((socket.timeout, socket.error)):
conn = kafka.conn.KafkaConnection("localhost", server_port, 1.0)
kafka.conn.KafkaConnection("localhost", server_port, 1.0)
self.assertGreaterEqual(t.interval, 1.0)

@kafka_versions("all")
Expand Down
18 changes: 9 additions & 9 deletions test/test_conn.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,13 @@ def setUp(self):
self.addCleanup(patcher.stop)

# Also mock socket.sendall() to appear successful
socket.create_connection().sendall.return_value = None
self.MockCreateConn().sendall.return_value = None

# And mock socket.recv() to return two payloads, then '', then raise
# Note that this currently ignores the num_bytes parameter to sock.recv()
payload_size = len(self.config['payload'])
payload2_size = len(self.config['payload2'])
socket.create_connection().recv.side_effect = [
self.MockCreateConn().recv.side_effect = [
struct.pack('>i', payload_size),
struct.pack('>%ds' % payload_size, self.config['payload']),
struct.pack('>i', payload2_size),
Expand All @@ -42,7 +42,7 @@ def setUp(self):
self.conn = KafkaConnection(self.config['host'], self.config['port'])

# Reset any mock counts caused by __init__
socket.create_connection.reset_mock()
self.MockCreateConn.reset_mock()

def test_collect_hosts__happy_path(self):
hosts = "localhost:1234,localhost"
Expand Down Expand Up @@ -81,7 +81,7 @@ def test_send(self):

def test_init_creates_socket_connection(self):
KafkaConnection(self.config['host'], self.config['port'])
socket.create_connection.assert_called_with((self.config['host'], self.config['port']), DEFAULT_SOCKET_TIMEOUT_SECONDS)
self.MockCreateConn.assert_called_with((self.config['host'], self.config['port']), DEFAULT_SOCKET_TIMEOUT_SECONDS)

def test_init_failure_raises_connection_error(self):

Expand All @@ -102,9 +102,9 @@ def test_send__reconnects_on_dirty_conn(self):
pass

# Now test that sending attempts to reconnect
self.assertEqual(socket.create_connection.call_count, 0)
self.assertEqual(self.MockCreateConn.call_count, 0)
self.conn.send(self.config['request_id'], self.config['payload'])
self.assertEqual(socket.create_connection.call_count, 1)
self.assertEqual(self.MockCreateConn.call_count, 1)

def test_send__failure_sets_dirty_connection(self):

Expand All @@ -131,9 +131,9 @@ def test_recv__reconnects_on_dirty_conn(self):
pass

# Now test that recv'ing attempts to reconnect
self.assertEqual(socket.create_connection.call_count, 0)
self.assertEqual(self.MockCreateConn.call_count, 0)
self.conn.recv(self.config['request_id'])
self.assertEqual(socket.create_connection.call_count, 1)
self.assertEqual(self.MockCreateConn.call_count, 1)

def test_recv__failure_sets_dirty_connection(self):

Expand All @@ -160,5 +160,5 @@ def test_close__object_is_reusable(self):
# will re-connect and send data to the socket
self.conn.close()
self.conn.send(self.config['request_id'], self.config['payload'])
self.assertEqual(socket.create_connection.call_count, 1)
self.assertEqual(self.MockCreateConn.call_count, 1)
self.conn._sock.sendall.assert_called_with(self.config['payload'])
1 change: 1 addition & 0 deletions test/testutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def get_open_port():
class KafkaIntegrationTestCase(unittest2.TestCase):
create_client = True
topic = None
server = None

def setUp(self):
super(KafkaIntegrationTestCase, self).setUp()
Expand Down
8 changes: 7 additions & 1 deletion tox.ini
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[tox]
envlist = py26, py27, pypy
envlist = lint, py26, py27, pypy
[testenv]
deps =
unittest2
Expand All @@ -11,3 +11,9 @@ commands =
nosetests {posargs:-v --with-id --with-coverage --cover-erase --cover-package kafka}
setenv =
PROJECT_ROOT = {toxinidir}
[testenv:lint]
deps =
unittest2
mock
pylint
commands = pylint {posargs: -E --ignore=queue.py kafka test}

0 comments on commit e151529

Please sign in to comment.