Skip to content

Commit

Permalink
Merge pull request ceph#23532 from alfredodeza/wip-rm24960
Browse files Browse the repository at this point in the history
ceph-volume:  expand auto engine for single type devices on filestore

Reviewed-by: Andrew Schoen <[email protected]>
  • Loading branch information
andrewschoen authored Aug 14, 2018
2 parents b2fe116 + 9066792 commit 2bec876
Show file tree
Hide file tree
Showing 57 changed files with 1,377 additions and 204 deletions.
210 changes: 100 additions & 110 deletions doc/rados/configuration/osd-config-ref.rst

Large diffs are not rendered by default.

55 changes: 13 additions & 42 deletions src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,31 +5,7 @@
from ceph_volume.api import lvm
from . import validators
from ceph_volume.devices.lvm.create import Create

# TODO: get these templates out so filestore can re-use them

osd_header_template = """
{:-^80}""".format('')


osd_component_titles = """
Type Path LV Size % of device"""


osd_component_template = """
{_type: <15} {path: <25} {size: <15} {percent}%"""


header_template = """
Total OSDs: {total_osds}
"""

vg_template = """
Solid State VG:
Targets: {target: <25} Total size: {total_lv_size: <25}
Total LVs: {total_lvs: <25} Size per LV: {lv_size: <25}
Devices: {block_db_devices}
"""
from ceph_volume.util import templates


class SingleType(object):
Expand All @@ -51,14 +27,14 @@ def report_json(self):

def report_pretty(self):
string = ""
string += header_template.format(
string += templates.total_osds.format(
total_osds=len(self.hdds) or len(self.ssds) * 2
)
string += osd_component_titles
string += templates.osd_component_titles

for osd in self.computed['osds']:
string += osd_header_template
string += osd_component_template.format(
string += templates.osd_header
string += templates.osd_component.format(
_type='[data]',
path=osd['data']['path'],
size=osd['data']['human_readable_size'],
Expand Down Expand Up @@ -159,16 +135,11 @@ def report_pretty(self):
db_size = str(disk.Size(b=(vg_extents['sizes'])))

string = ""
string += header_template.format(
targets='block.db',
total_lv_size=str(self.total_ssd_size),
total_lvs=vg_extents['parts'],
block_lv_size=db_size,
block_db_devices=', '.join([ssd['path'] for ssd in self.ssds]),
lv_size=str(disk.Size(b=(vg_extents['sizes']))),
string += templates.total_osds.format(
total_osds=len(self.hdds)
)
string += vg_template.format(

string += templates.ssd_volume_group.format(
target='block.db',
total_lv_size=str(self.total_ssd_size),
total_lvs=vg_extents['parts'],
Expand All @@ -178,16 +149,16 @@ def report_pretty(self):
total_osds=len(self.hdds)
)

string += osd_component_titles
string += templates.osd_component_titles
for osd in self.computed['osds']:
string += osd_header_template
string += osd_component_template.format(
string += templates.osd_header
string += templates.osd_component.format(
_type='[data]',
path=osd['data']['path'],
size=osd['data']['human_readable_size'],
percent=osd['data']['percentage'])

string += osd_component_template.format(
string += templates.osd_component.format(
_type='[block.db]',
path='(volume-group/lv)',
size=osd['block.db']['human_readable_size'],
Expand Down Expand Up @@ -265,7 +236,7 @@ def validate(self):
# the minimum alllowed for block.db
self.total_ssd_size = disk.Size(b=0)
for ssd in self.ssds:
self.total_ssd_size + disk.Size(b=ssd['size'])
self.total_ssd_size += disk.Size(b=ssd['size'])

self.block_db_size = self.total_ssd_size / self.db_lvs
self.vg_extents = lvm.sizing(self.total_ssd_size.b, parts=self.db_lvs)
Expand Down
115 changes: 114 additions & 1 deletion src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,120 @@
from __future__ import print_function
import json
from ceph_volume.util import disk, prepare
from ceph_volume.api import lvm
from . import validators
from ceph_volume.devices.lvm.create import Create
from ceph_volume.util import templates


class SingleType(object):
pass
"""
Support for all SSDs, or all HDDs, data and journal LVs will be colocated
in the same device
"""

def __init__(self, devices, args):
self.args = args
self.devices = devices
self.hdds = [device for device in devices if device['rotational'] == '1']
self.ssds = [device for device in devices if device['rotational'] == '0']
self.computed = {'osds': [], 'vgs': []}
self.validate()
self.compute()

def report_json(self):
print(json.dumps(self.computed, indent=4, sort_keys=True))

def report_pretty(self):
string = ""
string += templates.total_osds.format(
total_osds=len(self.hdds) or len(self.ssds) * 2
)
string += templates.osd_component_titles

for osd in self.computed['osds']:
string += templates.osd_header
string += templates.osd_component.format(
_type='[data]',
path=osd['data']['path'],
size=osd['data']['human_readable_size'],
percent=osd['data']['percentage'],
)
string += templates.osd_component.format(
_type='[journal]',
path=osd['journal']['path'],
size=osd['journal']['human_readable_size'],
percent=osd['journal']['percentage'],
)

print(string)

def validate(self):
"""
Ensure that the minimum requirements for this type of scenario is
met, raise an error if the provided devices would not work
"""
# validate minimum size for all devices
validators.minimum_device_size(self.devices)

def compute(self):
"""
Go through the rules needed to properly size the lvs, return
a dictionary with the result
"""
# chose whichever is the one group we have to compute against
devices = self.hdds or self.ssds
osds = self.computed['osds']
vgs = self.computed['vgs']
for device in devices:
device_size = disk.Size(b=device['size'])
journal_size = prepare.get_journal_size(lv_format=False)
data_size = device_size - journal_size
data_percentage = data_size * 100 / device_size
vgs.append({'devices': [device['path']], 'parts': 2})
osd = {'data': {}, 'journal': {}}
osd['data']['path'] = device['path']
osd['data']['size'] = data_size.b
osd['data']['percentage'] = int(data_percentage)
osd['data']['human_readable_size'] = str(data_size)
osd['journal']['path'] = device['path']
osd['journal']['size'] = journal_size.b
osd['journal']['percentage'] = int(100 - data_percentage)
osd['journal']['human_readable_size'] = str(journal_size)
osds.append(osd)

def execute(self):
"""
Create vgs/lvs from the incoming set of devices, assign their roles
(data, journal) and offload the OSD creation to ``lvm create``
"""
osd_vgs = []

# create the vgs first, one per device (since this is colocating, it
# picks the 'data' path)
for osd in self.computed['osds']:
vg = lvm.create_vg(osd['data']['path'])
osd_vgs.append(vg)

# create the lvs from the vgs captured in the beginning
for vg in osd_vgs:
# this is called again, getting us the LVM formatted string
journal_size = prepare.get_journal_size()
journal_lv = lvm.create_lv('osd-journal', vg.name, size=journal_size)
# no extents or size means it will use 100%FREE
data_lv = lvm.create_lv('osd-data', vg.name)

command = ['--filestore', '--data']
command.append('%s/%s' % (vg.name, data_lv.name))
command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)])
if self.args.dmcrypt:
command.append('--dmcrypt')
if self.args.no_systemd:
command.append('--no-systemd')
if self.args.crush_device_class:
command.extend(['--crush-device-class', self.args.crush_device_class])

Create(command).main()


class MixedType(object):
Expand Down
71 changes: 36 additions & 35 deletions src/ceph-volume/ceph_volume/devices/lvm/zap.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,19 @@ class Zap(object):
def __init__(self, argv):
self.argv = argv

def unmount_lv(self, lv):
if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'):
lv_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id'])
else:
lv_path = lv.path
dmcrypt_uuid = lv.lv_uuid
dmcrypt = lv.encrypted
if system.path_is_mounted(lv_path):
mlogger.info("Unmounting %s", lv_path)
system.unmount(lv_path)
if dmcrypt and dmcrypt_uuid:
self.dmcrypt_close(dmcrypt_uuid)

@decorators.needs_root
def zap(self, args):
device = args.device
Expand All @@ -56,64 +69,52 @@ def zap(self, args):
if lv:
# we are zapping a logical volume
path = lv.lv_path
self.unmount_lv(lv)
else:
# we are zapping a partition
#TODO: ensure device is a partition
path = device
# check to if it is encrypted to close
partuuid = disk.get_partuuid(device)
if encryption.status("/dev/mapper/{}".format(partuuid)):
dmcrypt_uuid = partuuid
self.dmcrypt_close(dmcrypt_uuid)

mlogger.info("Zapping: %s", path)

# check if there was a pv created with the
# name of device
pv = api.get_pv(pv_name=device)
if pv:
pvs = api.PVolumes()
pvs.filter(pv_name=device)
vgs = set([pv.vg_name for pv in pvs])
for pv in pvs:
vg_name = pv.vg_name
lv = api.get_lv(vg_name=vg_name)
lv = api.get_lv(vg_name=vg_name, lv_uuid=pv.lv_uuid)

dmcrypt = False
dmcrypt_uuid = None
if lv:
if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'):
lv_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id'])
else:
lv_path = lv.path
dmcrypt_uuid = lv.lv_uuid
dmcrypt = lv.encrypted
if system.path_is_mounted(lv_path):
mlogger.info("Unmounting %s", lv_path)
system.unmount(lv_path)
else:
# we're most likely dealing with a partition here, check to
# see if it was encrypted
partuuid = disk.get_partuuid(device)
if encryption.status("/dev/mapper/{}".format(partuuid)):
dmcrypt_uuid = partuuid
dmcrypt = True
if lv:
self.unmount_lv(lv)

if dmcrypt and dmcrypt_uuid:
dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid)
mlogger.info("Closing encrypted path %s", dmcrypt_path)
encryption.dmcrypt_close(dmcrypt_path)

if args.destroy and pv:
logger.info("Found a physical volume created from %s, will destroy all it's vgs and lvs", device)
vg_name = pv.vg_name
mlogger.info("Destroying volume group %s because --destroy was given", vg_name)
api.remove_vg(vg_name)
if args.destroy:
for vg_name in vgs:
mlogger.info("Destroying volume group %s because --destroy was given", vg_name)
api.remove_vg(vg_name)
mlogger.info("Destroying physical volume %s because --destroy was given", device)
api.remove_pv(device)
elif args.destroy and not pv:
mlogger.info("Skipping --destroy because no associated physical volumes are found for %s", device)

wipefs(path)
zap_data(path)

if lv and not pv:
if lv and not pvs:
# remove all lvm metadata
lv.clear_tags()

terminal.success("Zapping successful for: %s" % path)

def dmcrypt_close(self, dmcrypt_uuid):
dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid)
mlogger.info("Closing encrypted path %s", dmcrypt_path)
encryption.dmcrypt_close(dmcrypt_path)

def main(self):
sub_command_help = dedent("""
Zaps the given logical volume, raw device or partition for reuse by ceph-volume.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
---

ceph_dev: True
cluster: ceph
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
monitor_interface: eth1
osd_objectstore: "bluestore"
osd_scenario: lvm
dmcrypt: true
ceph_origin: 'repository'
ceph_repository: 'dev'
copy_admin_key: false
devices:
- /dev/sdb
- /dev/sdc
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[mons]
mon0

[osds]
osd0

[mgrs]
mon0
Loading

0 comments on commit 2bec876

Please sign in to comment.