Skip to content

Commit

Permalink
tests: ceph-disk workunit increase verbosity
Browse files Browse the repository at this point in the history
So that reading the teuthology log is enough in most cases to figure out
the cause of the error.

Signed-off-by: Loic Dachary <[email protected]>
  • Loading branch information
ldachary committed Dec 21, 2015
1 parent b271a06 commit fd7fe8c
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 38 deletions.
91 changes: 54 additions & 37 deletions qa/workunits/ceph-disk/ceph-disk-test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# When debugging these tests (must be root), here are a few useful commands:
#
# export PATH=..:$PATH
# ln -sf /home/ubuntu/ceph/src/ceph-disk /usr/sbin/ceph-disk
# ln -sf /home/ubuntu/ceph/src/ceph-disk $(which ceph-disk)
# ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
# ln -sf /home/ubuntu/ceph/systemd/[email protected] /usr/lib/systemd/system/[email protected]
# ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
Expand Down Expand Up @@ -60,9 +60,25 @@ def helper(command):

@staticmethod
def sh(command):
output = subprocess.check_output(command, shell=True)
LOG.debug("sh: " + command + ": " + output)
return output.strip()
LOG.debug(":sh: " + command)
proc = subprocess.Popen(
args=command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
bufsize=1)
lines = []
with proc.stdout:
for line in iter(proc.stdout.readline, b''):
line = line.decode('utf-8')
lines.append(line)
LOG.debug(str(line.strip()))
if proc.wait() != 0:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=command
)
return "".join(lines)

def unused_disks(self, pattern='[vs]d.'):
names = filter(
Expand Down Expand Up @@ -116,10 +132,11 @@ def get_journal_partition(self, uuid):
"journal for uuid = " + uuid + " not found in " + str(disks))

def destroy_osd(self, uuid):
id = self.sh("ceph osd create " + uuid)
id = self.sh("ceph osd create " + uuid).strip()
self.sh("""
ceph-disk deactivate --deactivate-by-id {id}
ceph-disk destroy --destroy-by-id {id} --zap
set -xe
ceph-disk --verbose deactivate --deactivate-by-id {id}
ceph-disk --verbose destroy --destroy-by-id {id} --zap
""".format(id=id))

@staticmethod
Expand Down Expand Up @@ -171,7 +188,7 @@ class TestCephDisk(object):
def setup_class(self):
logging.basicConfig(level=logging.DEBUG)
c = CephDisk()
if c.sh("lsb_release -si") == 'CentOS':
if c.sh("lsb_release -si").strip() == 'CentOS':
c.helper("install multipath-tools device-mapper-multipath")
c.conf['global']['osd journal size'] = 100
c.save_conf()
Expand All @@ -188,17 +205,17 @@ def test_deactivate_reactivate_osd(self):
have_journal = True
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
c.sh("ceph-disk --verbose zap " + disk)
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
assert len(device['partitions']) == 2
c.check_osd_status(osd_uuid, have_journal)
data_partition = c.get_osd_partition(osd_uuid)
c.sh("ceph-disk deactivate " + data_partition['path'])
c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
c.wait_for_osd_down(osd_uuid)
c.sh("ceph-disk activate " + data_partition['path'] + " --reactivate")
c.sh("ceph-disk --verbose activate " + data_partition['path'] + " --reactivate")
# check again
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
Expand All @@ -211,7 +228,7 @@ def test_destroy_osd_by_id(self):
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + disk)
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid + " " + disk)
c.wait_for_osd_up(osd_uuid)
c.check_osd_status(osd_uuid)
c.destroy_osd(osd_uuid)
Expand All @@ -220,14 +237,14 @@ def test_destroy_osd_by_dev_path(self):
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + disk)
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid + " " + disk)
c.wait_for_osd_up(osd_uuid)
partition = c.get_osd_partition(osd_uuid)
assert partition['type'] == 'data'
assert partition['state'] == 'active'
c.sh("ceph-disk deactivate " + partition['path'])
c.sh("ceph-disk --verbose deactivate " + partition['path'])
c.wait_for_osd_down(osd_uuid)
c.sh("ceph-disk destroy " + partition['path'] + " --zap")
c.sh("ceph-disk --verbose destroy " + partition['path'] + " --zap")

def test_deactivate_reactivate_dmcrypt_plain(self):
c = CephDisk()
Expand All @@ -246,7 +263,7 @@ def activate_reactivate_dmcrypt(self, type):
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
journal_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
c.sh("ceph-disk --verbose zap " + disk)
c.sh("ceph-disk --verbose prepare " +
" --osd-uuid " + osd_uuid +
" --journal-uuid " + journal_uuid +
Expand All @@ -255,9 +272,9 @@ def activate_reactivate_dmcrypt(self, type):
c.wait_for_osd_up(osd_uuid)
c.check_osd_status(osd_uuid, have_journal)
data_partition = c.get_osd_partition(osd_uuid)
c.sh("ceph-disk deactivate " + data_partition['path'])
c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
c.wait_for_osd_down(osd_uuid)
c.sh("ceph-disk activate " + data_partition['path'] +
c.sh("ceph-disk --verbose activate " + data_partition['path'] +
" --reactivate" + " --dmcrypt")
# check again
c.wait_for_osd_up(osd_uuid)
Expand All @@ -281,7 +298,7 @@ def activate_dmcrypt(self, type):
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
journal_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
c.sh("ceph-disk --verbose zap " + disk)
c.sh("ceph-disk --verbose prepare " +
" --osd-uuid " + osd_uuid +
" --journal-uuid " + journal_uuid +
Expand All @@ -295,10 +312,10 @@ def test_activate_no_journal(self):
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
c.sh("ceph-disk --verbose zap " + disk)
c.conf['global']['osd objectstore'] = 'memstore'
c.save_conf()
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
Expand All @@ -316,8 +333,8 @@ def test_activate_with_journal(self):
have_journal = True
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
c.sh("ceph-disk --verbose zap " + disk)
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
Expand All @@ -333,8 +350,8 @@ def test_activate_with_journal_dev_is_symlink(self):
tempdir = tempfile.mkdtemp()
symlink = os.path.join(tempdir, 'osd')
os.symlink(disk, symlink)
c.sh("ceph-disk zap " + symlink)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
c.sh("ceph-disk --verbose zap " + symlink)
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + symlink)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + symlink))[0]
Expand All @@ -346,7 +363,7 @@ def test_activate_with_journal_dev_is_symlink(self):
assert journal_partition
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + symlink)
c.sh("ceph-disk --verbose zap " + symlink)
os.unlink(symlink)
os.rmdir(tempdir)

Expand All @@ -358,7 +375,7 @@ def test_activate_separated_journal(self):
osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
c.helper("pool_read_write 1") # 1 == pool size
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + data_disk + " " + journal_disk)
c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)

def test_activate_separated_journal_dev_is_symlink(self):
c = CephDisk()
Expand All @@ -374,7 +391,7 @@ def test_activate_separated_journal_dev_is_symlink(self):
data_symlink, journal_symlink)
c.helper("pool_read_write 1") # 1 == pool size
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + data_symlink + " " + journal_symlink)
c.sh("ceph-disk --verbose zap " + data_symlink + " " + journal_symlink)
os.unlink(data_symlink)
os.unlink(journal_symlink)
os.rmdir(tempdir)
Expand All @@ -383,7 +400,7 @@ def activate_separated_journal(self, data_disk, journal_disk):
c = CephDisk()
have_journal = True
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(
Expand Down Expand Up @@ -415,7 +432,7 @@ def test_activate_two_separated_journal(self):
c.helper("pool_read_write 2") # 2 == pool size
c.destroy_osd(osd_uuid)
c.destroy_osd(other_osd_uuid)
c.sh("ceph-disk zap " + data_disk + " " +
c.sh("ceph-disk --verbose zap " + data_disk + " " +
journal_disk + " " + other_data_disk)

#
Expand All @@ -433,12 +450,12 @@ def test_activate_reuse_journal(self):
journal_partition = c.get_journal_partition(osd_uuid)
journal_path = journal_partition['path']
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + data_disk)
c.sh("ceph-disk --verbose zap " + data_disk)
osd_uuid = str(uuid.uuid1())
#
# Create another OSD with the journal partition of the previous OSD
#
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_path)
c.helper("pool_read_write 1") # 1 == pool size
c.wait_for_osd_up(osd_uuid)
Expand All @@ -452,11 +469,11 @@ def test_activate_reuse_journal(self):
#
assert journal_partition['path'] == journal_path
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + data_disk + " " + journal_disk)
c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)

def test_activate_multipath(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'CentOS':
if c.sh("lsb_release -si").strip() != 'CentOS':
pytest.skip(
"see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
c.ensure_sd()
Expand All @@ -475,8 +492,8 @@ def test_activate_multipath(self):
# Prepare the multipath device
#
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + multipath)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
c.sh("ceph-disk --verbose zap " + multipath)
c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + multipath)
c.wait_for_osd_up(osd_uuid)
device = json.loads(
Expand Down
2 changes: 1 addition & 1 deletion qa/workunits/ceph-disk/ceph-disk.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ if ! which py.test > /dev/null; then
exit 1
fi

sudo env PATH=$(dirname $0)/..:$PATH py.test -v $(dirname $0)/ceph-disk-test.py
sudo env PATH=$(dirname $0)/..:$PATH py.test -s -v $(dirname $0)/ceph-disk-test.py
result=$?

# own whatever was created as a side effect of the py.test run
Expand Down
2 changes: 2 additions & 0 deletions qa/workunits/ceph-helpers-root.sh
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,6 @@ function pool_read_write() {

#######################################################################

set -x

"$@"

0 comments on commit fd7fe8c

Please sign in to comment.