Skip to content

Commit

Permalink
Merge pull request ceph#43763 from rhcs-dashboard/cephadm_box
Browse files Browse the repository at this point in the history
cephadm/box: Cephadm Docker in Docker dev box

Reviewed-by: Alfonso Martínez <[email protected]>
Reviewed-by: Avan Thakkar <[email protected]>
Reviewed-by: Ernesto Puerta <[email protected]>
  • Loading branch information
alfonsomthd authored Jan 4, 2022
2 parents 74cd624 + 3cc808b commit 9600c73
Show file tree
Hide file tree
Showing 14 changed files with 684 additions and 1 deletion.
2 changes: 2 additions & 0 deletions ceph.spec.in
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,8 @@ Summary: Utility to bootstrap Ceph clusters
BuildArch: noarch
Requires: lvm2
Requires: python%{python3_pkgversion}
Requires: openssh-server
Requires: which
%if 0%{?weak_deps}
Recommends: podman >= 2.0.2
%endif
Expand Down
29 changes: 29 additions & 0 deletions src/cephadm/box/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# https://developers.redhat.com/blog/2014/05/05/running-systemd-within-docker-container/
FROM centos:8 as centos-systemd
ENV container docker
ENV CEPHADM_PATH=/usr/local/sbin/cephadm
RUN dnf -y install chrony firewalld lvm2 \
openssh-server openssh-clients python3 \
yum-utils sudo which && dnf clean all

RUN systemctl enable chronyd firewalld sshd


FROM centos-systemd as centos-systemd-docker
# To cache cephadm images
RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
RUN dnf -y install docker-ce && \
dnf clean all && systemctl enable docker

# ssh utilities
RUN dnf install epel-release -y && dnf makecache && dnf install sshpass -y

EXPOSE 8443
EXPOSE 22

FROM centos-systemd-docker
WORKDIR /root
# VOLUME /var/run/docker.sock
COPY start /usr/local/bin

CMD [ "/usr/sbin/init" ]
Empty file added src/cephadm/box/__init__.py
Empty file.
263 changes: 263 additions & 0 deletions src/cephadm/box/box.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,263 @@
#!/bin/python3
import argparse
import os
import stat
import sys

import host
import osd
from util import (Config, Target, ensure_inside_container,
ensure_outside_container, get_boxes_container_info,
get_host_ips, inside_container, run_cephadm_shell_command,
run_dc_shell_command, run_shell_command)

CEPH_IMAGE = 'quay.ceph.io/ceph-ci/ceph:master'
BOX_IMAGE = 'cephadm-box:latest'

def cleanup_box() -> None:
osd.cleanup()

def image_exists(image_name: str):
# extract_tag
assert image_name.find(':')
image_name, tag = image_name.split(':')
images = run_shell_command('docker image ls').split('\n')
IMAGE_NAME = 0
TAG = 1
for image in images:
image = image.split()
print(image)
print(image_name, tag)
if image[IMAGE_NAME] == image_name and image[TAG] == tag:
return True
return False

def get_ceph_image():
print('Getting ceph image')
run_shell_command(f'docker pull {CEPH_IMAGE}')
# update
run_shell_command(f'docker build -t {CEPH_IMAGE} docker/ceph')
if not os.path.exists('docker/ceph/image'):
os.mkdir('docker/ceph/image')
image_tar = 'docker/ceph/image/quay.ceph.image.tar'
if not os.path.exists(image_tar):
os.remove(image_tar)
run_shell_command(f'docker save {CEPH_IMAGE} -o {image_tar}')
print('Ceph image added')

def get_box_image():
print('Getting box image')
run_shell_command('docker build -t cephadm-box -f Dockerfile .')
print('Box image added')


class Cluster(Target):
_help = 'Manage docker cephadm boxes'
actions = ['bootstrap', 'start', 'down', 'list', 'sh', 'setup', 'cleanup']

def set_args(self):
self.parser.add_argument('action', choices=Cluster.actions, help='Action to perform on the box')
self.parser.add_argument('--osds', type=int, default=1, help='Number of osds')
self.parser.add_argument('--hosts', type=int, default=1, help='Number of hosts')
self.parser.add_argument('--skip_deploy_osds', action='store_true', help='skip deploy osd')
self.parser.add_argument('--skip_create_loop', action='store_true', help='skip create loopback device' )
self.parser.add_argument('--skip_monitoring_stack', action='store_true', help='skip monitoring stack')
self.parser.add_argument('--skip_dashboard', action='store_true', help='skip dashboard')

@ensure_outside_container
def setup(self):
get_ceph_image()
get_box_image()

@ensure_outside_container
def cleanup(self):
cleanup_box()

@ensure_inside_container
def bootstrap(self):
print('Running bootstrap on seed')
cephadm_path = os.environ.get('CEPHADM_PATH')
os.symlink('/cephadm/cephadm', cephadm_path)
run_shell_command('systemctl restart docker') # restart to ensure docker is using daemon.json

st = os.stat(cephadm_path)
os.chmod(cephadm_path, st.st_mode | stat.S_IEXEC)

run_shell_command('docker load < /cephadm/box/docker/ceph/image/quay.ceph.image.tar')
# cephadm guid error because it sometimes tries to use quay.ceph.io/ceph-ci/ceph:<none>
# instead of master's tag
run_shell_command('export CEPH_SOURCE_FOLDER=/ceph')
run_shell_command('export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master')
run_shell_command('echo "export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master" >> ~/.bashrc')

extra_args = []

shared_ceph_folder = os.environ.get('SHARED_CEPH_FOLDER')
if shared_ceph_folder:
extra_args.extend(['--shared_ceph_folder', shared_ceph_folder])

extra_args.append('--skip-pull')

# cephadm prints in warning, let's redirect it to the output so shell_command doesn't
# complain
extra_args.append('2>&0')

extra_args = ' '.join(extra_args)
skip_monitoring_stack = '--skip_monitoring_stack' if Config.get('skip_monitoring_stack') else ''
skip_dashboard = '--skip_dashboard' if Config.get('skip_dashboard') else ''

fsid = Config.get('fsid')
config_folder = Config.get('config_folder')
config = Config.get('config')
mon_config = Config.get('mon_config')
keyring = Config.get('keyring')
if not os.path.exists(config_folder):
os.mkdir(config_folder)

cephadm_bootstrap_command = (
'$CEPHADM_PATH --verbose bootstrap '
'--mon-ip "$(hostname -i)" '
'--allow-fqdn-hostname '
'--initial-dashboard-password admin '
'--dashboard-password-noupdate '
'--shared_ceph_folder /ceph '
'--allow-overwrite '
f'--output-config {config} '
f'--output-keyring {keyring} '
f'--output-config {config} '
f'--fsid "{fsid}" '
'--log-to-file '
f'{skip_dashboard} '
f'{skip_monitoring_stack} '
f'{extra_args} '
)

print('Running cephadm bootstrap...')
run_shell_command(cephadm_bootstrap_command)
print('Cephadm bootstrap complete')


run_shell_command('sudo vgchange --refresh')
run_shell_command('cephadm ls')
run_shell_command('ln -s /ceph/src/cephadm/box/box.py /usr/bin/box')

hostname = run_shell_command('hostname')
# NOTE: sometimes cephadm in the box takes a while to update the containers
# running in the cluster and it cannot deploy the osds. In this case
# run: box -v osd deploy --vg vg1 to deploy osds again.
if not Config.get('skip_deploy_osds'):
print('Deploying osds...')
osds = Config.get('osds')
for o in range(osds):
osd.deploy_osd(f'vg1/lv{o}', hostname)
print('Osds deployed')
run_cephadm_shell_command('ceph -s')
print('Bootstrap completed!')



@ensure_outside_container
def start(self):
osds = Config.get('osds')
hosts = Config.get('hosts')

# ensure boxes don't exist
run_shell_command('docker-compose down')

print('Checking docker images')
if not image_exists(CEPH_IMAGE):
get_ceph_image()
if not image_exists(BOX_IMAGE):
get_box_image()

if not Config.get('skip_create_loop'):
print('Adding logical volumes (block devices) in loopback device...')
osd.create_loopback_devices(osds)
print(f'Added {osds} logical volumes in a loopback device')

print('Starting containers')

dcflags = '-f docker-compose.yml'
if not os.path.exists('/sys/fs/cgroup/cgroup.controllers'):
dcflags += ' -f docker-compose.cgroup1.yml'
run_shell_command(f'docker-compose {dcflags} up --scale hosts={hosts} -d')

run_shell_command('sudo sysctl net.ipv4.conf.all.forwarding=1')
run_shell_command('sudo iptables -P FORWARD ACCEPT')

print('Seting up host ssh servers')
ips = get_host_ips()
print(ips)
for h in range(hosts):
host._setup_ssh(h+1)

verbose = '-v' if Config.get('verbose') else ''
skip_deploy = '--skip_deploy_osds' if Config.get('skip_deploy_osds') else ''
skip_monitoring_stack = '--skip_monitoring_stack' if Config.get('skip_monitoring_stack') else ''
skip_dashboard = '--skip_dashboard' if Config.get('skip_dashboard') else ''
box_bootstrap_command = (
f'/cephadm/box/box.py {verbose} cluster bootstrap '
'--osds {osds} '
'--hosts {hosts} '
f'{skip_deploy} '
f'{skip_dashboard} '
f'{skip_monitoring_stack} '
)
run_dc_shell_command(f'/cephadm/box/box.py {verbose} cluster bootstrap --osds {osds} --hosts {hosts} {skip_deploy}', 1, 'seed')

host._copy_cluster_ssh_key(ips)

print('Bootstrap finished successfully')

@ensure_outside_container
def down(self):
run_shell_command('docker-compose down')
cleanup_box()
print('Successfully killed all boxes')

@ensure_outside_container
def list(self):
info = get_boxes_container_info()
for container in info:
print('\t'.join(container))

@ensure_outside_container
def sh(self):
# we need verbose to see the prompt after running shell command
Config.set('verbose', True)
print('Seed bash')
run_shell_command('docker-compose exec seed bash')




targets = {
'cluster': Cluster,
'osd': osd.Osd,
'host': host.Host,
}

def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', action='store_true', dest='verbose', help='be more verbose')

subparsers = parser.add_subparsers()
target_instances = {}
for name, target in targets.items():
target_instances[name] = target(None, subparsers)

for count, arg in enumerate(sys.argv, 1):
if arg in targets:
instance = target_instances[arg]
if hasattr(instance, 'main'):
instance.argv = sys.argv[count:]
instance.set_args()
args = parser.parse_args()
Config.add_args(vars(args))
instance.main()
sys.exit(0)

parser.print_help()

if __name__ == '__main__':
main()
3 changes: 3 additions & 0 deletions src/cephadm/box/daemon.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"storage-driver": "fuse-overlayfs"
}
10 changes: 10 additions & 0 deletions src/cephadm/box/docker-compose.cgroup1.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
version: "2.4"

# If cgroups v2 is disabled then add cgroup fs
services:
seed:
volumes:
- "/sys/fs/cgroup:/sys/fs/cgroup:ro"
hosts:
volumes:
- "/sys/fs/cgroup:/sys/fs/cgroup:ro"
45 changes: 45 additions & 0 deletions src/cephadm/box/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
version: "2.4"
services:
cephadm-host-base:
build:
context: .
environment:
- CEPH_BRANCH=master
image: cephadm-box
# probably not needed with rootless Docker and cgroups v2
privileged: true
# cap_add:
# - SYS_ADMIN
# - NET_ADMIN
# - SYS_TIME
# - MKNOD
stop_signal: RTMIN+3
volumes:
- ../../../:/ceph
- ..:/cephadm
- ./daemon.json:/etc/docker/daemon.json
# dangerous, maybe just map the loopback
# https://stackoverflow.com/questions/36880565/why-dont-my-udev-rules-work-inside-of-a-running-docker-container
- /dev:/dev
networks:
- public
mem_limit: "20g"
scale: -1
seed:
extends:
service: cephadm-host-base
ports:
- "3000:3000"
- "8443:8443"
- "9095:9095"
scale: 1
hosts:
extends:
service: cephadm-host-base
scale: 3


volumes:
var-lib-docker:
networks:
public:
Empty file.
2 changes: 2 additions & 0 deletions src/cephadm/box/docker/ceph/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
FROM quay.ceph.io/ceph-ci/ceph:master
EXPOSE 8443
2 changes: 2 additions & 0 deletions src/cephadm/box/docker/ceph/locale.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
LANG="en_US.UTF-8"
LC_ALL="en_US.UTF-8"
Loading

0 comments on commit 9600c73

Please sign in to comment.