Skip to content

Commit

Permalink
Merge PR ceph#36394 into master
Browse files Browse the repository at this point in the history
* refs/pull/36394/head:
	qa: add method run ceph cluster command with better interface
	cephfs: allow only "good" characters

Reviewed-by: Varsha Rao <[email protected]>
Reviewed-by: Patrick Donnelly <[email protected]>
  • Loading branch information
batrick committed Aug 24, 2020
2 parents 08a601e + 5309821 commit 0281398
Show file tree
Hide file tree
Showing 7 changed files with 96 additions and 57 deletions.
4 changes: 4 additions & 0 deletions PendingReleaseNotes
Original file line number Diff line number Diff line change
Expand Up @@ -106,3 +106,7 @@
* Alpine build related script, documentation and test have been removed since
the most updated APKBUILD script of Ceph is already included by Alpine Linux's
aports repository.

* fs: Names of new FSs, volumes, subvolumes and subvolume groups can only
contain alphanumeric and ``-``, ``_`` and ``.`` characters. Some commands
or CephX credentials may not work with old FSs with non-conformant names.
72 changes: 27 additions & 45 deletions qa/tasks/ceph_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -1329,61 +1329,43 @@ def tmp(x):
except CommandFailedError:
self.log('Failed to get pg_num from pool %s, ignoring' % pool)

def run_cluster_cmd(self, **kwargs):
"""
Run a Ceph command and return the object representing the process
for the command.
Accepts arguments same as that of teuthology.orchestra.run.run()
"""
if self.cephadm:
return shell(self.ctx, self.cluster, self.controller,
args=['ceph'] + list(kwargs['args']),
stdout=StringIO(),
check_status=kwargs.get('check_status', True))

testdir = teuthology.get_testdir(self.ctx)
prefix = ['sudo', 'adjust-ulimits', 'ceph-coverage',
f'{testdir}/archive/coverage', 'timeout', '120', 'ceph',
'--cluster', self.cluster, '--log-early']
kwargs['args'] = prefix + list(kwargs['args'])
return self.controller.run(**kwargs)

def raw_cluster_cmd(self, *args):
"""
Start ceph on a raw cluster. Return count
"""
if self.cephadm:
proc = shell(self.ctx, self.cluster, self.controller,
args=['ceph'] + list(args),
stdout=StringIO())
else:
testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'timeout',
'120',
'ceph',
'--cluster',
self.cluster,
'--log-early',
]
ceph_args.extend(args)
proc = self.controller.run(
args=ceph_args,
stdout=StringIO(),
)
return proc.stdout.getvalue()
return self.run_cluster_cmd(**{'args': args,
'stdout': StringIO()}).stdout.getvalue()

def raw_cluster_cmd_result(self, *args, **kwargs):
"""
Start ceph on a cluster. Return success or failure information.
"""
if self.cephadm:
proc = shell(self.ctx, self.cluster, self.controller,
args=['ceph'] + list(args),
check_status=False)
else:
testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'timeout',
'120',
'ceph',
'--cluster',
self.cluster,
]
ceph_args.extend(args)
kwargs['args'] = ceph_args
kwargs['check_status'] = False
proc = self.controller.run(**kwargs)
return proc.exitstatus
return shell(self.ctx, self.cluster, self.controller, args=args,
check_status=False).existatus

kwargs['args'], kwargs['check_status'] = args, False
return self.run_cluster_cmd(**kwargs).exitstatus

def run_ceph_w(self, watch_channel=None):
"""
Expand Down
24 changes: 24 additions & 0 deletions qa/tasks/cephfs/test_admin.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
from io import StringIO

from teuthology.orchestra.run import CommandFailedError

Expand All @@ -16,6 +17,29 @@ class TestAdminCommands(CephFSTestCase):
CLIENTS_REQUIRED = 1
MDSS_REQUIRED = 1

def test_fsnames_can_only_by_goodchars(self):
n = 'test_fsnames_can_only_by_goodchars'
metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
badname = n+'badname@#'

self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
n+metapoolname)
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
n+datapoolname)

# test that fsname not with "goodchars" fails
args = ['fs', 'new', badname, metapoolname, datapoolname]
proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(),
check_status=False)
self.assertIn('invalid chars', proc.stderr.getvalue().lower())

self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
metapoolname,
'--yes-i-really-really-mean-it-not-faking')
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname,
datapoolname,
'--yes-i-really-really-mean-it-not-faking')

def test_fs_status(self):
"""
That `ceph fs status` command functions.
Expand Down
18 changes: 18 additions & 0 deletions qa/tasks/cephfs/test_volumes.py
Original file line number Diff line number Diff line change
Expand Up @@ -3284,3 +3284,21 @@ def test_subvolume_ops_on_nonexistent_vol(self):
self.assertEqual(ce.exitstatus, errno.ENOENT)
else:
self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))

def test_names_can_only_be_goodchars(self):
"""
Test the creating vols, subvols subvolgroups fails when their names uses
characters beyond [a-zA-Z0-9 -_.].
"""
volname, badname = 'testvol', 'abcd@#'

with self.assertRaises(CommandFailedError):
self._fs_cmd('volume', 'create', badname)
self._fs_cmd('volume', 'create', volname)

with self.assertRaises(CommandFailedError):
self._fs_cmd('subvolumegroup', 'create', volname, badname)

with self.assertRaises(CommandFailedError):
self._fs_cmd('subvolume', 'create', volname, badname)
self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
23 changes: 16 additions & 7 deletions qa/tasks/vstart_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -865,23 +865,32 @@ def run_ceph_w(self, watch_channel=None):
proc = self.controller.run(args=args, wait=False, stdout=StringIO())
return proc

def run_cluster_cmd(self, **kwargs):
"""
Run a Ceph command and the object representing the process for the
command.
Accepts arguments same as teuthology.orchestra.remote.run().
"""
kwargs['args'] = [os.path.join(BIN_PREFIX,'ceph')]+list(kwargs['args'])
return self.controller.run(**kwargs)

def raw_cluster_cmd(self, *args, **kwargs):
"""
args like ["osd", "dump"}
return stdout string
"""
proc = self.controller.run(args=[os.path.join(BIN_PREFIX, "ceph")] +\
list(args), **kwargs, stdout=StringIO())
return proc.stdout.getvalue()
kwargs['args'] = args
if kwargs.get('stdout') is None:
kwargs['stdout'] = StringIO()
return self.run_cluster_cmd(**kwargs).stdout.getvalue()

def raw_cluster_cmd_result(self, *args, **kwargs):
"""
like raw_cluster_cmd but don't check status, just return rc
"""
kwargs['check_status'] = False
proc = self.controller.run(args=[os.path.join(BIN_PREFIX, "ceph")] + \
list(args), **kwargs)
return proc.exitstatus
kwargs['args'], kwargs['check_status'] = args, False
return self.run_cluster_cmd(**kwargs).exitstatus

def admin_socket(self, daemon_type, daemon_id, command, check_status=True,
timeout=None, stdout=None):
Expand Down
2 changes: 1 addition & 1 deletion src/mon/MonCommands.h
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ COMMAND_WITH_FLAG("mds newfs "
"make new filesystem using pools <metadata> and <data>",
"mds", "rw", FLAG(OBSOLETE))
COMMAND("fs new "
"name=fs_name,type=CephString "
"name=fs_name,type=CephString,goodchars=[A-Za-z0-9-_.] "
"name=metadata,type=CephString "
"name=data,type=CephString "
"name=force,type=CephBool,req=false "
Expand Down
10 changes: 6 additions & 4 deletions src/pybind/mgr/volumes/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@

log = logging.getLogger(__name__)

goodchars = '[A-Za-z0-9-_.]'

class VolumesInfoWrapper():
def __init__(self, f, context):
self.f = f
Expand Down Expand Up @@ -42,7 +44,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
},
{
'cmd': 'fs volume create '
'name=name,type=CephString '
f'name=name,type=CephString,goodchars={goodchars} '
'name=placement,type=CephString,req=false ',
'desc': "Create a CephFS volume",
'perm': 'rw'
Expand All @@ -63,7 +65,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
{
'cmd': 'fs subvolumegroup create '
'name=vol_name,type=CephString '
'name=group_name,type=CephString '
f'name=group_name,type=CephString,goodchars={goodchars} '
'name=pool_layout,type=CephString,req=false '
'name=uid,type=CephInt,req=false '
'name=gid,type=CephInt,req=false '
Expand All @@ -90,7 +92,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
{
'cmd': 'fs subvolume create '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
f'name=sub_name,type=CephString,goodchars={goodchars} '
'name=size,type=CephInt,req=false '
'name=group_name,type=CephString,req=false '
'name=pool_layout,type=CephString,req=false '
Expand Down Expand Up @@ -315,7 +317,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
{
'cmd': 'nfs cluster create '
'name=type,type=CephString '
'name=clusterid,type=CephString,goodchars=[A-Za-z0-9-_.] '
f'name=clusterid,type=CephString,goodchars={goodchars} '
'name=placement,type=CephString,req=false ',
'desc': "Create an NFS Cluster",
'perm': 'rw'
Expand Down

0 comments on commit 0281398

Please sign in to comment.