Skip to content

Commit

Permalink
Merge pull request ceph#33246 from jan--f/c-v-batch-fix-has_common_vg
Browse files Browse the repository at this point in the history
ceph-volume: use get_device_vgs in has_common_vg
  • Loading branch information
jan--f authored Feb 13, 2020
2 parents 5b5002d + 2c5a8c3 commit fec03c5
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 41 deletions.
11 changes: 4 additions & 7 deletions src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import json
from ceph_volume.util.prepare import osd_id_available
from ceph_volume.api.lvm import get_device_vgs

class Strategy(object):

Expand Down Expand Up @@ -64,10 +65,6 @@ class MixedStrategy(Strategy):
def get_common_vg(self, devs):
# find all the vgs associated with the current device
for dev in devs:
for pv in dev.pvs_api:
vg = self.system_vgs.get(vg_name=pv.vg_name)
if not vg:
continue
# this should give us just one VG, it would've been caught by
# the validator otherwise
return vg
vgs = get_device_vgs(dev.abspath)
if vgs:
return vgs[0]
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,13 @@ def has_common_vg(ssd_devices):
Ensure that devices have a common VG between them
"""
msg = 'Could not find a common VG between devices: %s'
system_vgs = lvm.VolumeGroups()
ssd_vgs = {}

for ssd_device in ssd_devices:
for pv in ssd_device.pvs_api:
vg = system_vgs.get(vg_name=pv.vg_name)
if not vg:
continue
vgs = lvm.get_device_vgs(ssd_device.abspath)
if not vgs:
continue
for vg in vgs:
try:
ssd_vgs[vg.name].append(ssd_device.abspath)
except KeyError:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pytest
from mock.mock import patch
from ceph_volume.devices.lvm.strategies import filestore
from ceph_volume.api import lvm

Expand Down Expand Up @@ -155,21 +156,23 @@ def test_hdd_device_is_lvm_member_fails(self, stub_vgs, fakedevice, factory, con
filestore.MixedType.with_auto_devices(args, devices)
assert 'Unable to use device, already a member of LVM' in str(error.value)

def test_ssd_is_lvm_member_doesnt_fail(self, volumes, stub_vgs, fakedevice, factory, conf_ceph):
# fast PV, because ssd is an LVM member
CephPV = lvm.PVolume(vg_name='fast', pv_name='/dev/sda', pv_tags='')
@patch('ceph_volume.devices.lvm.strategies.strategies.MixedStrategy.get_common_vg')
def test_ssd_is_lvm_member_doesnt_fail(self,
patched_get_common_vg,
volumes,
fakedevice,
factory,
conf_ceph):
ssd = fakedevice(
used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000), pvs_api=[CephPV]
used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000)
)
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
# when get_api_vgs() gets called, it will return this one VG
stub_vgs([
dict(
vg = lvm.VolumeGroup(
vg_name='fast', lv_name='foo',
lv_path='/dev/vg/foo', lv_tags="ceph.type=data",
vg_extent_size=1024*1024*1024, vg_free_count=7
)
])
vg_extent_size=1024*1024*1024, vg_free_count=7)
patched_get_common_vg.return_value = vg


conf_ceph(get_safe=lambda *a: '5120')
args = factory(filtered_devices=[], osds_per_device=1,
Expand All @@ -180,37 +183,24 @@ def test_ssd_is_lvm_member_doesnt_fail(self, volumes, stub_vgs, fakedevice, fact
assert result['journal']['percentage'] == 71
assert result['journal']['human_readable_size'] == '5.00 GB'

def test_no_common_vg(self, volumes, stub_vgs, fakedevice, factory, conf_ceph):
# fast PV, because ssd is an LVM member
CephPV1 = lvm.PVolume(vg_name='fast1', pv_name='/dev/sda', pv_tags='')
CephPV2 = lvm.PVolume(vg_name='fast2', pv_name='/dev/sdb', pv_tags='')
@patch('ceph_volume.api.lvm.get_device_vgs')
def test_no_common_vg(self, patched_get_device_vgs, volumes, fakedevice, factory, conf_ceph):
patched_get_device_vgs.side_effect = lambda x: [lvm.VolumeGroup(vg_name='{}'.format(x[-1]), vg_tags='')]
ssd1 = fakedevice(
used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000), pvs_api=[CephPV1]
used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000)
)
ssd2 = fakedevice(
used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000), pvs_api=[CephPV2]
used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000)
)
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
# when get_api_vgs() gets called, it will return this one VG
stub_vgs([
dict(
vg_free='7g', vg_name='fast1', lv_name='foo',
lv_path='/dev/vg/fast1', lv_tags="ceph.type=data"
),
dict(
vg_free='7g', vg_name='fast2', lv_name='foo',
lv_path='/dev/vg/fast2', lv_tags="ceph.type=data"
)
])

conf_ceph(get_safe=lambda *a: '5120')
args = factory(filtered_devices=[], osds_per_device=1,
journal_size=None, osd_ids=[])
devices = [ssd1, ssd2, hdd]
with pytest.raises(RuntimeError) as error:
filestore.MixedType.with_auto_devices(args, devices)

assert 'Could not find a common VG between devices' in str(error.value)
assert 'Could not find a common VG between devices' in str(error.value)

def test_ssd_device_fails_multiple_osds(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '15120')
Expand Down

0 comments on commit fec03c5

Please sign in to comment.