Skip to content

Commit

Permalink
Merge pull request ceph#28294 from alfredodeza/wip-rm40063
Browse files Browse the repository at this point in the history
ceph-volume: pass --ssh-config to pytest to resolve hosts when connecting

Reviewed-by: Andrew Schoen <[email protected]>
  • Loading branch information
andrewschoen authored Jun 4, 2019
2 parents a30f6ed + ed708e5 commit 937e898
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
8 changes: 4 additions & 4 deletions src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -52,20 +52,20 @@ commands=
# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml

# test cluster state using testinfra
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# test cluster state using testinfra
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}

# retest to ensure cluster came back up correctly after rebooting
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml

# retest to ensure cluster came back up correctly
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# test zap OSDs by ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
Expand Down
6 changes: 3 additions & 3 deletions src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -57,18 +57,18 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml

# test cluster state using testinfra
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}

# retest to ensure cluster came back up correctly after rebooting
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml

# retest to ensure cluster came back up correctly
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
4 changes: 2 additions & 2 deletions src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml

# test cluster state testinfra
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
Expand All @@ -59,6 +59,6 @@ commands=
sleep 120

# retest to ensure cluster came back up correctly after rebooting
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}

0 comments on commit 937e898

Please sign in to comment.