Skip to content

Commit

Permalink
Merge pull request ceph#1121 from ceph/wip-cephmetrics
Browse files Browse the repository at this point in the history
Add a cephmetrics task
  • Loading branch information
vasukulkarni authored Oct 26, 2017
2 parents 34c8274 + 5c14522 commit ac87321
Show file tree
Hide file tree
Showing 5 changed files with 154 additions and 32 deletions.
7 changes: 3 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#
# pip-compile --output-file requirements.txt setup.py
#
ansible==2.3.0.0
ansible==2.4.0.0
apache-libcloud==1.5.0
appdirs==1.4.3 # via os-client-config
argparse==1.4.0
Expand All @@ -21,7 +21,7 @@ cmd2==0.7.0 # via cliff
configobj==5.0.6
configparser==3.5.0
contextlib2==0.5.4 # via raven
cryptography==1.8.1 # via paramiko, pyopenssl
cryptography==1.8.1 # via ansible, paramiko, pyopenssl
debtcollector==1.13.0 # via oslo.config, oslo.utils, python-keystoneclient, python-neutronclient
deprecation==1.0 # via openstacksdk
docopt==0.6.2
Expand Down Expand Up @@ -61,7 +61,7 @@ packaging==16.8 # via cryptography
paramiko==2.1.2
pbr==2.0.0 # via cliff, debtcollector, keystoneauth1, openstacksdk, osc-lib, oslo.i18n, oslo.serialization, oslo.utils, positional, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-novaclient, python-openstackclient, requestsexceptions, stevedore
pexpect==4.2.1
pip-tools==1.9.0
pip-tools==1.10.1
pluggy==0.4.0 # via tox
positional==1.1.1 # via keystoneauth1, python-keystoneclient
prettytable==0.7.2
Expand All @@ -70,7 +70,6 @@ ptyprocess==0.5.1 # via pexpect
py==1.4.33 # via tox
pyasn1==0.2.3
pycparser==2.17 # via cffi
pycrypto==2.6.1 # via ansible
pyopenssl==16.2.0
pyparsing==2.2.0 # via cliff, cmd2, oslo.utils, packaging
python-cinderclient==2.0.1 # via python-openstackclient
Expand Down
9 changes: 6 additions & 3 deletions teuthology/task/ansible.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,12 +203,15 @@ def generate_hosts_file(self):
self.inventory = self._write_hosts_file(hosts_str)
self.generated_inventory = True

def _write_hosts_file(self, content):
def _write_hosts_file(self, content, suffix=''):
"""
Actually write the hosts file
"""
hosts_file = NamedTemporaryFile(prefix="teuth_ansible_hosts_",
delete=False)
hosts_file = NamedTemporaryFile(
prefix="teuth_ansible_hosts_",
suffix=suffix,
delete=False,
)
hosts_file.write(content)
hosts_file.flush()
return hosts_file.name
Expand Down
53 changes: 35 additions & 18 deletions teuthology/task/ceph_ansible.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,15 @@ class CephAnsible(Task):
* Set ``public_network`` for each host if ``public_network`` is unset
""".format(git_base=teuth_config.ceph_git_base_url)

groups_to_roles = dict(
mons='mon',
mgrs='mgr',
mdss='mds',
osds='osd',
rgws='rgw',
clients='client',
)

def __init__(self, ctx, config):
super(CephAnsible, self).__init__(ctx, config)
config = self.config or dict()
Expand All @@ -62,6 +71,7 @@ def __init__(self, ctx, config):
vars['ceph_dev_key'] = 'https://download.ceph.com/keys/autobuild.asc'
if 'ceph_dev_branch' not in vars:
vars['ceph_dev_branch'] = ctx.config.get('branch', 'master')
self.cluster_name = vars.get('cluster', 'ceph')

def setup(self):
super(CephAnsible, self).setup()
Expand Down Expand Up @@ -114,17 +124,9 @@ def execute_playbook(self):
self.run_playbook()

def generate_hosts_file(self):
groups_to_roles = dict(
mons='mon',
mgrs='mgr',
mdss='mds',
osds='osd',
rgws='rgw',
clients='client',
)
hosts_dict = dict()
for group in sorted(groups_to_roles.keys()):
role_prefix = groups_to_roles[group]
for group in sorted(self.groups_to_roles.keys()):
role_prefix = self.groups_to_roles[group]
want = lambda role: role.startswith(role_prefix)
for (remote, roles) in self.cluster.only(want).remotes.iteritems():
hostname = remote.hostname
Expand Down Expand Up @@ -261,7 +263,15 @@ def collect_logs(self):
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
for remote in ctx.cluster.remotes.iterkeys():

def wanted(role):
# Only attempt to collect logs from hosts which are part of the
# cluster
return any(map(
lambda role_stub: role.startswith(role_stub),
self.groups_to_roles.values(),
))
for remote in ctx.cluster.only(wanted).remotes.keys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
misc.pull_directory(remote, '/var/log/ceph',
Expand All @@ -271,13 +281,21 @@ def wait_for_ceph_health(self):
with contextutil.safe_while(sleep=15, tries=6,
action='check health') as proceed:
(remote,) = self.ctx.cluster.only('mon.a').remotes
remote.run(args=['sudo', 'ceph', 'osd', 'tree'])
remote.run(args=['sudo', 'ceph', '-s'])
remote.run(args=[
'sudo', 'ceph', '--cluster', self.cluster_name, 'osd', 'tree'
])
remote.run(args=[
'sudo', 'ceph', '--cluster', self.cluster_name, '-s'
])
log.info("Waiting for Ceph health to reach HEALTH_OK \
or HEALTH WARN")
while proceed():
out = StringIO()
remote.run(args=['sudo', 'ceph', 'health'], stdout=out)
remote.run(
args=['sudo', 'ceph', '--cluster', self.cluster_name,
'health'],
stdout=out,
)
out = out.getvalue().split(None, 1)[0]
log.info("cluster in state: %s", out)
if out in ('HEALTH_OK', 'HEALTH_WARN'):
Expand Down Expand Up @@ -454,16 +472,15 @@ def _copy_and_print_config(self):

def _create_rbd_pool(self):
mon_node = self.ceph_installer
cluster_name = 'ceph'
log.info('Creating RBD pool')
mon_node.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'sudo', 'ceph', '--cluster', self.cluster_name,
'osd', 'pool', 'create', 'rbd', '128', '128'],
check_status=False)
mon_node.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'sudo', 'ceph', '--cluster', self.cluster_name,
'osd', 'pool', 'application', 'enable',
'rbd', 'rbd', '--yes-i-really-mean-it'
],
Expand All @@ -476,7 +493,7 @@ def fix_keyring_permission(self):
'sudo',
'chmod',
run.Raw('o+r'),
'/etc/ceph/ceph.client.admin.keyring'
'/etc/ceph/%s.client.admin.keyring' % self.cluster_name
])


Expand Down
95 changes: 95 additions & 0 deletions teuthology/task/cephmetrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import logging
import os
import pexpect
import time

from teuthology.config import config as teuth_config
from teuthology.exceptions import CommandFailedError

from .ansible import Ansible, LoggerFile

log = logging.getLogger(__name__)


class CephMetrics(Ansible):
def __init__(self, ctx, config):
super(CephMetrics, self).__init__(ctx, config)
if 'repo' not in self.config:
self.config['repo'] = os.path.join(
teuth_config.ceph_git_base_url, 'cephmetrics.git')
if 'playbook' not in self.config:
self.config['playbook'] = './ansible/playbook.yml'

def get_inventory(self):
return False

def generate_hosts_file(self):
groups_to_roles = {
'mons': 'mon',
'mgrs': 'mgr',
'mdss': 'mds',
'osds': 'osd',
'rgws': 'rgw',
'clients': 'client',
'ceph-grafana': 'cephmetrics',
}
hosts_dict = dict()
for group in sorted(groups_to_roles.keys()):
role_prefix = groups_to_roles[group]
want = lambda role: role.startswith(role_prefix)
if group not in hosts_dict:
hosts_dict[group] = dict(hosts=dict())
group_dict = hosts_dict[group]['hosts']
for (remote, roles) in self.cluster.only(want).remotes.iteritems():
hostname = remote.hostname
group_dict[hostname] = dict(
ansible_user=remote.user,
)
hosts_dict[group]['hosts'] = group_dict
# It might be preferable to use a YAML inventory file, but
# that won't work until an ansible release is out with:
# https://github.com/ansible/ansible/pull/30730
# Once that is done, we can simply do this:
# hosts_str = yaml.safe_dump(hosts_dict, default_flow_style=False)
# And then pass suffix='.yml' to _write_hosts_file().
hosts_lines = []
for group in hosts_dict.keys():
hosts_lines.append('[%s]' % group)
for host, vars_ in hosts_dict[group]['hosts'].items():
host_line = ' '.join(
[host] + map(
lambda tuple_: '='.join(tuple_),
vars_.items(),
)
)
hosts_lines.append(host_line)
hosts_lines.append('')
hosts_str = '\n'.join(hosts_lines)
self.inventory = self._write_hosts_file(hosts_str)
self.generated_inventory = True

def begin(self):
super(CephMetrics, self).begin()
wait_time = 5 * 60
self.log.info(
"Waiting %ss for data collection before running tests...",
wait_time,
)
time.sleep(wait_time)
self.run_tests()

def run_tests(self):
self.log.info("Running tests...")
command = "tox -e integration %s" % self.inventory
out, status = pexpect.run(
command,
cwd=self.repo_path,
logfile=LoggerFile(self.log.getChild('tests'), logging.INFO),
withexitstatus=True,
timeout=None,
)
if status != 0:
raise CommandFailedError(command, status)


task = CephMetrics
22 changes: 15 additions & 7 deletions teuthology/test/task/test_ansible.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,11 @@ def test_generate_hosts_file(self):
with patch.object(ansible, 'NamedTemporaryFile') as m_NTF:
m_NTF.return_value = hosts_file_obj
task.generate_hosts_file()
m_NTF.assert_called_once_with(prefix="teuth_ansible_hosts_",
delete=False)
m_NTF.assert_called_once_with(
prefix="teuth_ansible_hosts_",
delete=False,
suffix='',
)
assert task.generated_inventory is True
assert task.inventory == hosts_file_path
hosts_file_obj.seek(0)
Expand All @@ -255,9 +258,11 @@ def test_generate_playbook(self):
task.find_repo()
task.get_playbook()
task.generate_playbook()
m_NTF.assert_called_once_with(prefix="teuth_ansible_playbook_",
dir=task.repo_path,
delete=False)
m_NTF.assert_called_once_with(
prefix="teuth_ansible_playbook_",
dir=task.repo_path,
delete=False,
)
assert task.generated_playbook is True
assert task.playbook_file == playbook_file_obj
playbook_file_obj.seek(0)
Expand Down Expand Up @@ -483,8 +488,11 @@ def test_generate_hosts_file(self):
with patch.object(ansible, 'NamedTemporaryFile') as m_NTF:
m_NTF.return_value = hosts_file_obj
task.generate_hosts_file()
m_NTF.assert_called_once_with(prefix="teuth_ansible_hosts_",
delete=False)
m_NTF.assert_called_once_with(
prefix="teuth_ansible_hosts_",
delete=False,
suffix='',
)
assert task.generated_inventory is True
assert task.inventory == hosts_file_path
hosts_file_obj.seek(0)
Expand Down

0 comments on commit ac87321

Please sign in to comment.