Move live_migration test hooks under gate/
This patch resolves a TODO in the .zuul.yaml about using common irrelevant files in our dsvm jobs. To be able to do that we need to move the test hooks from nova/tests/live_migraton under gate/. Change-Id: I4e5352fd1a99ff2b4134a734eac6626be772caf1
This commit is contained in:
committed by
Matt Riedemann
parent
eb6fcb2191
commit
3c1d9dab85
Executable
+170
@@ -0,0 +1,170 @@
|
||||
#!/bin/bash
|
||||
|
||||
function prepare_ceph {
|
||||
git clone https://opendev.org/openstack/devstack-plugin-ceph /tmp/devstack-plugin-ceph
|
||||
source /tmp/devstack-plugin-ceph/devstack/settings
|
||||
source /tmp/devstack-plugin-ceph/devstack/lib/ceph
|
||||
install_ceph
|
||||
configure_ceph
|
||||
#install ceph-common package on compute nodes
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m raw -a "executable=/bin/bash
|
||||
source $BASE/new/devstack/functions
|
||||
source $BASE/new/devstack/functions-common
|
||||
git clone https://opendev.org/openstack/devstack-plugin-ceph /tmp/devstack-plugin-ceph
|
||||
source /tmp/devstack-plugin-ceph/devstack/lib/ceph
|
||||
install_ceph_remote
|
||||
"
|
||||
|
||||
#copy ceph admin keyring to compute nodes
|
||||
sudo cp /etc/ceph/ceph.client.admin.keyring /tmp/ceph.client.admin.keyring
|
||||
sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.client.admin.keyring
|
||||
sudo chmod 644 /tmp/ceph.client.admin.keyring
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.admin.keyring dest=/etc/ceph/ceph.client.admin.keyring owner=ceph group=ceph"
|
||||
sudo rm -f /tmp/ceph.client.admin.keyring
|
||||
#copy ceph.conf to compute nodes
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/etc/ceph/ceph.conf dest=/etc/ceph/ceph.conf owner=root group=root"
|
||||
|
||||
start_ceph
|
||||
}
|
||||
|
||||
function _ceph_configure_glance {
|
||||
GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
|
||||
mon "allow r" \
|
||||
osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
|
||||
sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
|
||||
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
|
||||
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=DEFAULT option=show_image_direct_url value=True"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=default_store value=rbd"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=stores value='file, http, rbd'"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_ceph_conf value=$CEPH_CONF_FILE"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_user value=$GLANCE_CEPH_USER"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_pool value=$GLANCE_CEPH_POOL"
|
||||
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
|
||||
if [[ $CEPH_REPLICAS -ne 1 ]]; then
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
|
||||
fi
|
||||
|
||||
#copy glance keyring to compute only node
|
||||
sudo cp /etc/ceph/ceph.client.glance.keyring /tmp/ceph.client.glance.keyring
|
||||
sudo chown $STACK_USER:$STACK_USER /tmp/ceph.client.glance.keyring
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.glance.keyring dest=/etc/ceph/ceph.client.glance.keyring"
|
||||
sudo rm -f /tmp/ceph.client.glance.keyring
|
||||
}
|
||||
|
||||
function configure_and_start_glance {
|
||||
_ceph_configure_glance
|
||||
echo 'check processes before glance-api stop'
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
|
||||
|
||||
# restart glance
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@g-api"
|
||||
|
||||
echo 'check processes after glance-api stop'
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
|
||||
}
|
||||
|
||||
function _ceph_configure_nova {
|
||||
#setup ceph for nova, we don't reuse configure_ceph_nova - as we need to emulate case where cinder is not configured for ceph
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
|
||||
NOVA_CONF=${NOVA_CPU_CONF:-/etc/nova/nova.conf}
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_user value=${CINDER_CEPH_USER}"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_secret_uuid value=${CINDER_CEPH_UUID}"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_key value=false"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_partition value=-2"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=disk_cachemodes value='network=writeback'"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_type value=rbd"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_pool value=${NOVA_CEPH_POOL}"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_ceph_conf value=${CEPH_CONF_FILE}"
|
||||
|
||||
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
|
||||
mon "allow r" \
|
||||
osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
|
||||
sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
|
||||
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
|
||||
|
||||
#copy cinder keyring to compute only node
|
||||
sudo cp /etc/ceph/ceph.client.cinder.keyring /tmp/ceph.client.cinder.keyring
|
||||
sudo chown stack:stack /tmp/ceph.client.cinder.keyring
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.cinder.keyring dest=/etc/ceph/ceph.client.cinder.keyring"
|
||||
sudo rm -f /tmp/ceph.client.cinder.keyring
|
||||
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
|
||||
if [[ $CEPH_REPLICAS -ne 1 ]]; then
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_and_start_nova {
|
||||
_ceph_configure_nova
|
||||
#import secret to libvirt
|
||||
_populate_libvirt_secret
|
||||
echo 'check compute processes before restart'
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute"
|
||||
|
||||
# restart nova-compute
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@n-cpu"
|
||||
|
||||
# test that they are all running again
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute"
|
||||
|
||||
}
|
||||
|
||||
function _ceph_configure_cinder {
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
|
||||
if [[ $CEPH_REPLICAS -ne 1 ]]; then
|
||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
|
||||
fi
|
||||
|
||||
CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf}
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_backend_name value=ceph"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_driver value=cinder.volume.drivers.rbd.RBDDriver"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_ceph_conf value=$CEPH_CONF_FILE"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_pool value=$CINDER_CEPH_POOL"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_user value=$CINDER_CEPH_USER"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_uuid value=$CINDER_CEPH_UUID"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_flatten_volume_from_snapshot value=False"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_max_clone_depth value=5"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=default_volume_type value=ceph"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=enabled_backends value=ceph"
|
||||
|
||||
}
|
||||
|
||||
function configure_and_start_cinder {
|
||||
_ceph_configure_cinder
|
||||
|
||||
# restart cinder
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@c-vol"
|
||||
|
||||
source $BASE/new/devstack/openrc
|
||||
|
||||
export OS_USERNAME=admin
|
||||
export OS_PROJECT_NAME=admin
|
||||
lvm_type=$(cinder type-list | awk -F "|" 'NR==4{ print $2}')
|
||||
cinder type-delete $lvm_type
|
||||
openstack volume type create --os-volume-api-version 1 --property volume_backend_name="ceph" ceph
|
||||
}
|
||||
|
||||
function _populate_libvirt_secret {
|
||||
cat > /tmp/secret.xml <<EOF
|
||||
<secret ephemeral='no' private='no'>
|
||||
<uuid>${CINDER_CEPH_UUID}</uuid>
|
||||
<usage type='ceph'>
|
||||
<name>client.${CINDER_CEPH_USER} secret</name>
|
||||
</usage>
|
||||
</secret>
|
||||
EOF
|
||||
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/secret.xml dest=/tmp/secret.xml"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-define --file /tmp/secret.xml"
|
||||
local secret=$(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
|
||||
# TODO(tdurakov): remove this escaping as https://github.com/ansible/ansible/issues/13862 fixed
|
||||
secret=${secret//=/'\='}
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $secret"
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/tmp/secret.xml state=absent"
|
||||
|
||||
}
|
||||
Executable
+50
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
function nfs_setup {
|
||||
if uses_debs; then
|
||||
module=apt
|
||||
elif is_fedora; then
|
||||
module=yum
|
||||
fi
|
||||
$ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m $module \
|
||||
-a "name=nfs-common state=present"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m $module \
|
||||
-a "name=nfs-kernel-server state=present"
|
||||
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-User value=nova"
|
||||
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-Group value=nova"
|
||||
|
||||
for SUBNODE in $SUBNODES ; do
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m lineinfile -a "dest=/etc/exports line='/opt/stack/data/nova/instances $SUBNODE(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)'"
|
||||
done
|
||||
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "exportfs -a"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=restarted"
|
||||
GetDistro
|
||||
if [[ ! ${DISTRO} =~ (xenial) ]]; then
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=idmapd state=restarted"
|
||||
fi
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 111 -j ACCEPT"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 111 -j ACCEPT"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 2049 -j ACCEPT"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 2049 -j ACCEPT"
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "mount -t nfs4 -o proto\=tcp,port\=2049 $primary_node:/ /opt/stack/data/nova/instances/"
|
||||
}
|
||||
|
||||
function nfs_configure_tempest {
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
|
||||
}
|
||||
|
||||
function nfs_verify_setup {
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/opt/stack/data/nova/instances/test_file state=touch"
|
||||
if [ ! -e '/opt/stack/data/nova/instances/test_file' ]; then
|
||||
die $LINENO "NFS configuration failure"
|
||||
fi
|
||||
}
|
||||
|
||||
function nfs_teardown {
|
||||
#teardown nfs shared storage
|
||||
$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "umount -t nfs4 /opt/stack/data/nova/instances/"
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=stopped"
|
||||
}
|
||||
Executable
+61
@@ -0,0 +1,61 @@
|
||||
#!/bin/bash
|
||||
# Live migration dedicated ci job will be responsible for testing different
|
||||
# environments based on underlying storage, used for ephemerals.
|
||||
# This hook allows to inject logic of environment reconfiguration in ci job.
|
||||
# Base scenario for this would be:
|
||||
#
|
||||
# 1. test with all local storage (use default for volumes)
|
||||
# 2. test with NFS for root + ephemeral disks
|
||||
# 3. test with Ceph for root + ephemeral disks
|
||||
# 4. test with Ceph for volumes and root + ephemeral disk
|
||||
|
||||
set -xe
|
||||
cd $BASE/new/tempest
|
||||
|
||||
source $BASE/new/devstack/functions
|
||||
source $BASE/new/devstack/functions-common
|
||||
source $BASE/new/devstack/lib/nova
|
||||
source $WORKSPACE/devstack-gate/functions.sh
|
||||
source $BASE/new/nova/gate/live_migration/hooks/utils.sh
|
||||
source $BASE/new/nova/gate/live_migration/hooks/nfs.sh
|
||||
source $BASE/new/nova/gate/live_migration/hooks/ceph.sh
|
||||
primary_node=$(cat /etc/nodepool/primary_node_private)
|
||||
SUBNODES=$(cat /etc/nodepool/sub_nodes_private)
|
||||
SERVICE_HOST=$primary_node
|
||||
STACK_USER=${STACK_USER:-stack}
|
||||
|
||||
echo '1. test with all local storage (use default for volumes)'
|
||||
echo 'NOTE: test_volume_backed_live_migration is skipped due to https://bugs.launchpad.net/nova/+bug/1524898'
|
||||
run_tempest "block migration test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration))"
|
||||
|
||||
# TODO(mriedem): Run $BASE/new/nova/gate/test_evacuate.sh for local storage
|
||||
|
||||
#all tests bellow this line use shared storage, need to update tempest.conf
|
||||
echo 'disabling block_migration in tempest'
|
||||
$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
|
||||
|
||||
echo '2. NFS testing is skipped due to setup failures with Ubuntu 16.04'
|
||||
#echo '2. test with NFS for root + ephemeral disks'
|
||||
|
||||
#nfs_setup
|
||||
#nfs_configure_tempest
|
||||
#nfs_verify_setup
|
||||
#run_tempest "NFS shared storage test" "live_migration"
|
||||
#nfs_teardown
|
||||
|
||||
echo '3. test with Ceph for root + ephemeral disks'
|
||||
# Discover and set variables for the OS version so the devstack-plugin-ceph
|
||||
# scripts can find the correct repository to install the ceph packages.
|
||||
GetOSVersion
|
||||
prepare_ceph
|
||||
GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
|
||||
configure_and_start_glance
|
||||
|
||||
configure_and_start_nova
|
||||
run_tempest "Ceph nova&glance test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration))"
|
||||
|
||||
set +e
|
||||
#echo '4. test with Ceph for volumes and root + ephemeral disk'
|
||||
|
||||
#configure_and_start_cinder
|
||||
#run_tempest "Ceph nova&glance&cinder test" "live_migration"
|
||||
Executable
+11
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
function run_tempest {
|
||||
local message=$1
|
||||
local tempest_regex=$2
|
||||
sudo -H -u tempest tox -eall -- $tempest_regex --concurrency=$TEMPEST_CONCURRENCY
|
||||
exitcode=$?
|
||||
if [[ $exitcode -ne 0 ]]; then
|
||||
die $LINENO "$message failure"
|
||||
fi
|
||||
}
|
||||
Reference in New Issue
Block a user