Skip to content

Commit

Permalink
Merge PR ceph#18192 into master
Browse files Browse the repository at this point in the history
* refs/pull/18192/head:
	qa/cephfs: test ec data pool
	qa/suites/fs/basic_functional/clusters: more osds

Reviewed-by: Patrick Donnelly <[email protected]>
  • Loading branch information
batrick committed Oct 26, 2017
2 parents 92141dc + d0732fc commit 2bba5d8
Show file tree
Hide file tree
Showing 47 changed files with 196 additions and 32 deletions.
4 changes: 2 additions & 2 deletions qa/cephfs/clusters/3-mds.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
roles:
- [mon.a, mon.c, mgr.y, mds.a, osd.0, osd.1, osd.2]
- [mon.b, mgr.x, mds.b, mds.c, osd.3, osd.4, osd.5]
- [mon.a, mon.c, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mgr.x, mds.b, mds.c, osd.4, osd.5, osd.6, osd.7]
- [client.0]
4 changes: 2 additions & 2 deletions qa/cephfs/clusters/9-mds.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
roles:
- [mon.a, mon.c, mgr.y, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
- [mon.a, mon.c, mgr.y, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.4, osd.5, osd.6, osd.7]
- [client.0]
28 changes: 28 additions & 0 deletions qa/cephfs/objectstore-ec/bluestore-comp-ec-root.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
cephfs_ec_profile:
- m=2
- k=2
- crush-failure-domain=osd
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode: aggressive
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95

# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true

23 changes: 23 additions & 0 deletions qa/cephfs/objectstore-ec/bluestore-comp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode: aggressive
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95

# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
42 changes: 42 additions & 0 deletions qa/cephfs/objectstore-ec/bluestore-ec-root.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
cephfs_ec_profile:
- m=2
- k=2
- crush-failure-domain=osd
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
ceph-deploy:
fs: xfs
bluestore: yes
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95

38 changes: 38 additions & 0 deletions qa/cephfs/objectstore-ec/bluestore.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
ceph-deploy:
fs: xfs
bluestore: yes
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95

15 changes: 15 additions & 0 deletions qa/cephfs/objectstore-ec/filestore-xfs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
overrides:
ceph:
fs: xfs
conf:
osd:
osd objectstore: filestore
osd sloppy crc: true
ceph-deploy:
fs: xfs
filestore: True
conf:
osd:
osd objectstore: filestore
osd sloppy crc: true

1 change: 0 additions & 1 deletion qa/suites/fs/32bits/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/32bits/objectstore-ec
4 changes: 2 additions & 2 deletions qa/suites/fs/basic_functional/clusters/4-remote-clients.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
roles:
- [mon.a, mgr.x, osd.0, mds.a, mds.b, client.1, client.2, client.3]
- [client.0, osd.1, osd.2]
- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, mds.a, mds.b, client.1, client.2, client.3]
- [client.0, osd.4, osd.5, osd.6, osd.7]
openstack:
- volumes: # attached to each instance
count: 2
Expand Down
1 change: 0 additions & 1 deletion qa/suites/fs/basic_workload/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/basic_workload/objectstore-ec
2 changes: 1 addition & 1 deletion qa/suites/fs/multiclient/clusters/three_clients.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
roles:
- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2]
- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
- [client.2]
- [client.1]
- [client.0]
Expand Down
2 changes: 1 addition & 1 deletion qa/suites/fs/multiclient/clusters/two_clients.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
roles:
- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2]
- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
- [client.1]
- [client.0]

Expand Down
1 change: 0 additions & 1 deletion qa/suites/fs/multiclient/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/multiclient/objectstore-ec
4 changes: 2 additions & 2 deletions qa/suites/fs/multifs/clusters/2-remote-clients.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
roles:
- [mon.a, mgr.x, osd.0, mon.b, mds.a, mds.b, client.1]
- [mds.c, mds.d, mon.c, client.0, osd.1, osd.2]
- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, mon.b, mds.a, mds.b, client.1]
- [mds.c, mds.d, mon.c, client.0, osd.4, osd.5, osd.6, osd.7]
openstack:
- volumes: # attached to each instance
count: 2
Expand Down
1 change: 0 additions & 1 deletion qa/suites/fs/multifs/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/multifs/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/fs/permission/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/permission/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/fs/snaps/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/snaps/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/fs/thrash/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/thrash/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/fs/traceless/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/traceless/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/fs/verify/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/verify/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/kcephfs/cephfs/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/kcephfs/cephfs/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/kcephfs/mixed-clients/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/kcephfs/mixed-clients/objectstore-ec
4 changes: 2 additions & 2 deletions qa/suites/kcephfs/recovery/clusters/4-remote-clients.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
roles:
- [mon.a, osd.0, mds.a, mds.c, client.2]
- [mgr.x, osd.1, osd.2, mds.b, mds.d, client.3]
- [mon.a, osd.0, osd.1, osd.2, osd.3, mds.a, mds.c, client.2]
- [mgr.x, osd.4, osd.5, osd.6, osd.7, mds.b, mds.d, client.3]
- [client.0]
- [client.1]
openstack:
Expand Down
1 change: 0 additions & 1 deletion qa/suites/kcephfs/recovery/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/kcephfs/recovery/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/kcephfs/thrash/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/kcephfs/thrash/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/multimds/basic/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/multimds/basic/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/multimds/thrash/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/multimds/thrash/objectstore-ec
1 change: 0 additions & 1 deletion qa/suites/multimds/verify/objectstore

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/multimds/verify/objectstore-ec
3 changes: 2 additions & 1 deletion qa/tasks/ceph.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,8 @@ def cephfs_setup(ctx, config):
if mdss.remotes:
log.info('Setting up CephFS filesystem...')

fs = Filesystem(ctx, name='cephfs', create=True)
fs = Filesystem(ctx, name='cephfs', create=True,
ec_profile=config.get('cephfs_ec_profile', None))

is_active_mds = lambda role: 'mds.' in role and not role.endswith('-s') and '-s-' not in role
all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles]
Expand Down
22 changes: 19 additions & 3 deletions qa/tasks/cephfs/filesystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,12 @@ class Filesystem(MDSCluster):
This object is for driving a CephFS filesystem. The MDS daemons driven by
MDSCluster may be shared with other Filesystems.
"""
def __init__(self, ctx, fscid=None, name=None, create=False):
def __init__(self, ctx, fscid=None, name=None, create=False,
ec_profile=None):
super(Filesystem, self).__init__(ctx)

self.name = name
self.ec_profile = ec_profile
self.id = None
self.metadata_pool_name = None
self.metadata_overlay = False
Expand Down Expand Up @@ -476,8 +478,22 @@ def create(self):
self.name, self.metadata_pool_name, data_pool_name,
'--allow-dangerous-metadata-overlay')
else:
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
data_pool_name, pgs_per_fs_pool.__str__())
if self.ec_profile:
log.info("EC profile is %s", self.ec_profile)
cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name]
cmd.extend(self.ec_profile)
self.mon_manager.raw_cluster_cmd(*cmd)
self.mon_manager.raw_cluster_cmd(
'osd', 'pool', 'create',
data_pool_name, pgs_per_fs_pool.__str__(), 'erasure',
data_pool_name)
self.mon_manager.raw_cluster_cmd(
'osd', 'pool', 'set',
data_pool_name, 'allow_ec_overwrites', 'true')
else:
self.mon_manager.raw_cluster_cmd(
'osd', 'pool', 'create',
data_pool_name, pgs_per_fs_pool.__str__())
self.mon_manager.raw_cluster_cmd('fs', 'new',
self.name, self.metadata_pool_name, data_pool_name)
self.check_pool_application(self.metadata_pool_name)
Expand Down

0 comments on commit 2bba5d8

Please sign in to comment.