diff --git a/README.rst b/README.rst
index 8dbdb88d..8063d078 100644
--- a/README.rst
+++ b/README.rst
@@ -140,9 +140,8 @@ By comparison, a provider settings file for HPCloud::
Note: The image regularly changes as new images are uploaded, for the
specific image name currently used for tests, see
-`nodepool.yaml.erb `_.
+`nodepool.yaml `_.
Source the provider settings, boot a server named "testserver" (chosen
arbitrarily for this example) with your SSH key allowed, and log into
@@ -252,4 +251,8 @@ You can file bugs on the storyboard devstack-gate project::
https://storyboard.openstack.org/#!/project/712
-And you can chat with us on Freenode in #openstack-dev or #openstack-infra.
+And you can chat with us on Freenode in #openstack-qa or #openstack-infra.
+
+It's worth noting that, while devstack-gate is generally licensed under the
+Apache license, `playbooks/plugins/callback/devstack.py` is GPLv3 due to having
+derived from the Ansible source code.
diff --git a/devstack-vm-gate-wrap.sh b/devstack-vm-gate-wrap.sh
index e62b1256..25a522c3 100755
--- a/devstack-vm-gate-wrap.sh
+++ b/devstack-vm-gate-wrap.sh
@@ -28,29 +28,35 @@ GIT_BRANCH=${GIT_BRANCH:-master}
# We're using enough ansible specific features that it's extremely
# possible that new ansible releases can break us. As such we should
# be very deliberate about which ansible we use.
-ANSIBLE_VERSION=${ANSIBLE_VERSION:-2.0.0.2}
+ANSIBLE_VERSION=${ANSIBLE_VERSION:-2.2.0.0}
+export DSTOOLS_VERSION=${DSTOOLS_VERSION:-0.3.0}
# sshd may have been compiled with a default path excluding */sbin
export PATH=$PATH:/usr/local/sbin:/usr/sbin
+# When doing xtrace (set -x / set -o xtrace), provide more debug output
+export PS4='+ ${BASH_SOURCE:-}:${FUNCNAME[0]:-}:L${LINENO:-}: '
+
+#check to see if WORKSPACE var is defined
+if [ -z ${WORKSPACE} ]; then
+ echo "The 'WORKSPACE' variable is undefined. It must be defined for this script to work"
+ exit 1
+fi
source $WORKSPACE/devstack-gate/functions.sh
start_timer
+# Save the PROJECTS variable as it was passed in. This is needed for reproduce.sh
+# incase the job definition contains items that are not in the "global" list
+# below.
+# See: https://bugs.launchpad.net/openstack-gate/+bug/1544827
+JOB_PROJECTS="$PROJECTS"
PROJECTS="openstack-infra/devstack-gate $PROJECTS"
PROJECTS="openstack-dev/devstack $PROJECTS"
PROJECTS="openstack-dev/pbr $PROJECTS"
-PROJECTS="openstack-infra/tripleo-ci $PROJECTS"
-PROJECTS="openstack/automaton $PROJECTS"
PROJECTS="openstack/ceilometer $PROJECTS"
PROJECTS="openstack/ceilometermiddleware $PROJECTS"
PROJECTS="openstack/cinder $PROJECTS"
-PROJECTS="openstack/cliff $PROJECTS"
-PROJECTS="openstack/debtcollector $PROJECTS"
-PROJECTS="openstack/dib-utils $PROJECTS"
-PROJECTS="openstack/diskimage-builder $PROJECTS"
-PROJECTS="openstack/django_openstack_auth $PROJECTS"
-PROJECTS="openstack/futurist $PROJECTS"
PROJECTS="openstack/glance $PROJECTS"
PROJECTS="openstack/glance_store $PROJECTS"
PROJECTS="openstack/heat $PROJECTS"
@@ -60,21 +66,44 @@ PROJECTS="openstack/horizon $PROJECTS"
PROJECTS="openstack/keystone $PROJECTS"
PROJECTS="openstack/keystoneauth $PROJECTS"
PROJECTS="openstack/keystonemiddleware $PROJECTS"
+PROJECTS="openstack/neutron $PROJECTS"
+PROJECTS="openstack/nova $PROJECTS"
+PROJECTS="openstack/requirements $PROJECTS"
+PROJECTS="openstack/swift $PROJECTS"
+PROJECTS="openstack/tempest $PROJECTS"
+PROJECTS="openstack/tempest-lib $PROJECTS"
+# Everything below this line in the PROJECTS list is for non
+# default devstack runs. Overtime we should remove items from
+# below and add them explicitly to the jobs that need them. The
+# reason for this is to reduce job runtimes, every git repo
+# has to be cloned and updated and checked out to the proper ref
+# which is not free.
+PROJECTS="openstack-infra/tripleo-ci $PROJECTS"
+PROJECTS="openstack/automaton $PROJECTS"
+PROJECTS="openstack/cliff $PROJECTS"
+PROJECTS="openstack/debtcollector $PROJECTS"
+# The devstack heat plugin uses these repos
+if [[ "$DEVSTACK_GATE_HEAT" -eq "1" ]] ; then
+ PROJECTS="openstack/dib-utils $PROJECTS"
+ PROJECTS="openstack/diskimage-builder $PROJECTS"
+fi
+PROJECTS="openstack/django_openstack_auth $PROJECTS"
+PROJECTS="openstack/futurist $PROJECTS"
PROJECTS="openstack/manila $PROJECTS"
PROJECTS="openstack/manila-ui $PROJECTS"
PROJECTS="openstack/zaqar $PROJECTS"
-PROJECTS="openstack/neutron $PROJECTS"
PROJECTS="openstack/neutron-fwaas $PROJECTS"
PROJECTS="openstack/neutron-lbaas $PROJECTS"
PROJECTS="openstack/octavia $PROJECTS"
PROJECTS="openstack/neutron-vpnaas $PROJECTS"
-PROJECTS="openstack/nova $PROJECTS"
PROJECTS="openstack/os-apply-config $PROJECTS"
PROJECTS="openstack/os-brick $PROJECTS"
+PROJECTS="openstack/os-client-config $PROJECTS"
PROJECTS="openstack/os-cloud-config $PROJECTS"
PROJECTS="openstack/os-collect-config $PROJECTS"
PROJECTS="openstack/os-net-config $PROJECTS"
PROJECTS="openstack/os-refresh-config $PROJECTS"
+PROJECTS="openstack/osc-lib $PROJECTS"
PROJECTS="openstack/oslo.cache $PROJECTS"
PROJECTS="openstack/oslo.concurrency $PROJECTS"
PROJECTS="openstack/oslo.config $PROJECTS"
@@ -93,14 +122,10 @@ PROJECTS="openstack/oslo.service $PROJECTS"
PROJECTS="openstack/oslo.versionedobjects $PROJECTS"
PROJECTS="openstack/oslo.vmware $PROJECTS"
PROJECTS="openstack/pycadf $PROJECTS"
-PROJECTS="openstack/requirements $PROJECTS"
PROJECTS="openstack/sahara $PROJECTS"
PROJECTS="openstack/sahara-dashboard $PROJECTS"
PROJECTS="openstack/stevedore $PROJECTS"
-PROJECTS="openstack/swift $PROJECTS"
PROJECTS="openstack/taskflow $PROJECTS"
-PROJECTS="openstack/tempest $PROJECTS"
-PROJECTS="openstack/tempest-lib $PROJECTS"
PROJECTS="openstack/tooz $PROJECTS"
PROJECTS="openstack/tripleo-heat-templates $PROJECTS"
PROJECTS="openstack/tripleo-image-elements $PROJECTS"
@@ -163,11 +188,36 @@ export DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION=${DEVSTACK_GATE_TEMPEST_DI
# Set to 1 to enable Cinder secure delete.
# False by default to avoid dd problems on Precise.
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
+# TODO(mriedem): CINDER_SECURE_DELETE is deprecated in devstack as of liberty.
+# Remove after kilo-eol happens in devstack.
export DEVSTACK_CINDER_SECURE_DELETE=${DEVSTACK_CINDER_SECURE_DELETE:-0}
+# Should cinder perform secure deletion of volumes?
+# Defaults to none to avoid bug 1023755. Can also be set to zero or shred.
+# Only applicable to stable/liberty+ devstack.
+export DEVSTACK_CINDER_VOLUME_CLEAR=${DEVSTACK_CINDER_VOLUME_CLEAR:-none}
+
+# Set this to override the branch selected for testing (in
+# single-branch checkouts; not used for grenade)
+export OVERRIDE_ZUUL_BRANCH=${OVERRIDE_ZUUL_BRANCH:-$ZUUL_BRANCH}
+
+stable_compare="stable/[a-n]"
+
# Set to 1 to run neutron instead of nova network
-# Only applicable to master branch
-export DEVSTACK_GATE_NEUTRON=${DEVSTACK_GATE_NEUTRON:-0}
+# This is a bit complicated to handle the deprecation of nova net across
+# repos with branches from this branchless job runner.
+if [ -n "$DEVSTACK_GATE_NEUTRON" ] ; then
+ # If someone has made a choice externally honor it
+ export DEVSTACK_GATE_NEUTRON=$DEVSTACK_GATE_NEUTRON
+elif [[ "$OVERRIDE_ZUUL_BRANCH" =~ $stable_compare ]] ; then
+ # Default to no neutron on older stable branches because nova net
+ # was the default all that time.
+ export DEVSTACK_GATE_NEUTRON=0
+else
+ # For everything else there is neutron
+ export DEVSTACK_GATE_NEUTRON=1
+fi
+
# Set to 1 to run neutron distributed virtual routing
export DEVSTACK_GATE_NEUTRON_DVR=${DEVSTACK_GATE_NEUTRON_DVR:-0}
@@ -194,7 +244,7 @@ export DEVSTACK_GATE_SAHARA=${DEVSTACK_GATE_SAHARA:-0}
export DEVSTACK_GATE_TROVE=${DEVSTACK_GATE_TROVE:-0}
# Set to 0 to disable config_drive and use the metadata server instead
-export DEVSTACK_GATE_CONFIGDRIVE=${DEVSTACK_GATE_CONFIGDRIVE:-1}
+export DEVSTACK_GATE_CONFIGDRIVE=${DEVSTACK_GATE_CONFIGDRIVE:-0}
# Set to 1 to enable installing test requirements
export DEVSTACK_GATE_INSTALL_TESTONLY=${DEVSTACK_GATE_INSTALL_TESTONLY:-0}
@@ -213,13 +263,9 @@ export DEVSTACK_PROJECT_FROM_GIT=${DEVSTACK_PROJECT_FROM_GIT:-}
# for a stable branch we want to both try to upgrade forward n => n+1 as
# well as upgrade from last n-1 => n.
#
-# i.e. stable/juno:
-# pullup means stable/icehouse => stable/juno
-# forward means stable/juno => master (or stable/kilo if that's out)
-# partial-ncpu means stable/icehouse => stable/juno but keep nova
-# compute at stable/icehouse
-# partial-ironic means stable/icehouse => stable/juno but keep ironic
-# compute at stable/icehouse
+# i.e. stable/ocata:
+# pullup means stable/newton => stable/ocata
+# forward means stable/ocata => master (or stable/pike if that's out)
export DEVSTACK_GATE_GRENADE=${DEVSTACK_GATE_GRENADE:-}
# the branch name for selecting grenade branches
@@ -258,46 +304,41 @@ if [[ -n "$DEVSTACK_GATE_GRENADE" ]]; then
# forward upgrades are an attempt to migrate up from an
# existing stable branch to the next release.
forward)
- if [[ "$GRENADE_BASE_BRANCH" == "stable/icehouse" ]]; then
- export GRENADE_OLD_BRANCH="stable/icehouse"
- export GRENADE_NEW_BRANCH="stable/juno"
- elif [[ "$GRENADE_BASE_BRANCH" == "stable/juno" ]]; then
- export GRENADE_OLD_BRANCH="stable/juno"
- export GRENADE_NEW_BRANCH="stable/kilo"
- elif [[ "$GRENADE_BASE_BRANCH" == "stable/kilo" ]]; then
+ if [[ "$GRENADE_BASE_BRANCH" == "stable/kilo" ]]; then
export GRENADE_OLD_BRANCH="stable/kilo"
export GRENADE_NEW_BRANCH="stable/liberty"
elif [[ "$GRENADE_BASE_BRANCH" == "stable/liberty" ]]; then
export GRENADE_OLD_BRANCH="stable/liberty"
+ export GRENADE_NEW_BRANCH="stable/mitaka"
+ elif [[ "$GRENADE_BASE_BRANCH" == "stable/mitaka" ]]; then
+ export GRENADE_OLD_BRANCH="stable/mitaka"
+ export GRENADE_NEW_BRANCH="stable/newton"
+ elif [[ "$GRENADE_BASE_BRANCH" == "stable/newton" ]]; then
+ export GRENADE_OLD_BRANCH="stable/newton"
+ export GRENADE_NEW_BRANCH="$GIT_BRANCH"
+ elif [[ "$GRENADE_BASE_BRANCH" == "stable/ocata" ]]; then
+ export GRENADE_OLD_BRANCH="stable/ocata"
export GRENADE_NEW_BRANCH="$GIT_BRANCH"
fi
;;
- # partial upgrades are like normal upgrades except they leave
- # certain services behind. We use the base 4 operator ';&'
- # here to fall trhough to the next conditionals
- partial-*)
- if [[ "$DEVSTACK_GATE_GRENADE" == "partial-ncpu" ]]; then
- export DO_NOT_UPGRADE_SERVICES=[n-cpu]
- elif [[ "$DEVSTACK_GATE_GRENADE" == "partial-ironic" ]]; then
- export DO_NOT_UPGRADE_SERVICES=[ir-api,ir-cond]
- fi
- ;&
-
# pullup upgrades are our normal upgrade test. Can you upgrade
# to the current patch from the last stable.
pullup)
- if [[ "$GRENADE_BASE_BRANCH" == "stable/juno" ]]; then
- export GRENADE_OLD_BRANCH="stable/icehouse"
- export GRENADE_NEW_BRANCH="stable/juno"
- elif [[ "$GRENADE_BASE_BRANCH" == "stable/kilo" ]]; then
- export GRENADE_OLD_BRANCH="stable/juno"
- export GRENADE_NEW_BRANCH="stable/kilo"
- elif [[ "$GRENADE_BASE_BRANCH" == "stable/liberty" ]]; then
+ if [[ "$GRENADE_BASE_BRANCH" == "stable/liberty" ]]; then
export GRENADE_OLD_BRANCH="stable/kilo"
export GRENADE_NEW_BRANCH="stable/liberty"
- else # master
+ elif [[ "$GRENADE_BASE_BRANCH" == "stable/mitaka" ]]; then
export GRENADE_OLD_BRANCH="stable/liberty"
+ export GRENADE_NEW_BRANCH="stable/mitaka"
+ elif [[ "$GRENADE_BASE_BRANCH" == "stable/newton" ]]; then
+ export GRENADE_OLD_BRANCH="stable/mitaka"
+ export GRENADE_NEW_BRANCH="stable/newton"
+ elif [[ "$GRENADE_BASE_BRANCH" == "stable/ocata" ]]; then
+ export GRENADE_OLD_BRANCH="stable/newton"
+ export GRENADE_NEW_BRANCH="stable/ocata"
+ else # master
+ export GRENADE_OLD_BRANCH="stable/ocata"
export GRENADE_NEW_BRANCH="$GIT_BRANCH"
fi
;;
@@ -363,10 +404,6 @@ export DEVSTACK_GATE_REMOVE_STACK_SUDO=${DEVSTACK_GATE_REMOVE_STACK_SUDO:-1}
# dependency-only installation.
export DEVSTACK_GATE_UNSTACK=${DEVSTACK_GATE_UNSTACK:-0}
-# Set this to override the branch selected for testing (in
-# single-branch checkouts; not used for grenade)
-export OVERRIDE_ZUUL_BRANCH=${OVERRIDE_ZUUL_BRANCH:-$ZUUL_BRANCH}
-
# Set Ceilometer backend to override the default one. It could be mysql,
# postgresql, mongodb.
export DEVSTACK_GATE_CEILOMETER_BACKEND=${DEVSTACK_GATE_CEILOMETER_BACKEND:-mysql}
@@ -388,6 +425,11 @@ export DEVSTACK_GATE_TOPOLOGY=${DEVSTACK_GATE_TOPOLOGY:-aio}
# for jobs that know exactly which repos they need.
export DEVSTACK_GATE_PROJECTS_OVERRIDE=${DEVSTACK_GATE_PROJECTS_OVERRIDE:-""}
+# Set this to "True" to force devstack to pick python 3.x. "False" will cause
+# devstack to pick python 2.x. We should leave this empty for devstack to
+# pick the default.
+export DEVSTACK_GATE_USE_PYTHON3=${DEVSTACK_GATE_USE_PYTHON3:-""}
+
# Set this to enable remote logging of the console via UDP packets to
# a specified ipv4 ip:port (note; not hostname -- ip address only).
# This can be extremely useful if a host is oopsing or dropping off
@@ -428,34 +470,43 @@ with $DEVSTACK_GATE_TIMEOUT_BUFFER minutes reserved for cleanup."
echo "Available disk space on this host:"
indent df -h
-# Enable tracing while we transition to using ansible to run
-# setup across multiple nodes.
-set -x
# Install ansible
sudo -H pip install virtualenv
virtualenv /tmp/ansible
-/tmp/ansible/bin/pip install ansible==$ANSIBLE_VERSION
+# NOTE(emilien): workaround to avoid installing cryptography
+# https://github.com/ansible/ansible/issues/15665
+/tmp/ansible/bin/pip install paramiko==1.16.0 ansible==$ANSIBLE_VERSION \
+ devstack-tools==$DSTOOLS_VERSION ara
export ANSIBLE=/tmp/ansible/bin/ansible
+export ANSIBLE_PLAYBOOK=/tmp/ansible/bin/ansible-playbook
+export DSCONF=/tmp/ansible/bin/dsconf
# Write inventory file with groupings
COUNTER=1
echo "[primary]" > "$WORKSPACE/inventory"
echo "localhost ansible_connection=local host_counter=$COUNTER" >> "$WORKSPACE/inventory"
echo "[subnodes]" >> "$WORKSPACE/inventory"
-SUBNODES=$(cat /etc/nodepool/sub_nodes_private)
+export SUBNODES=$(cat /etc/nodepool/sub_nodes_private)
for SUBNODE in $SUBNODES ; do
let COUNTER=COUNTER+1
echo "$SUBNODE host_counter=$COUNTER" >> "$WORKSPACE/inventory"
done
+# Write ansible config file
+cat > "$WORKSPACE/ansible.cfg" < "$WORKSPACE/test_env.sh")
+# Copy bootstrap to remote hosts
$ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" -m copy \
-a "src='$WORKSPACE/devstack-gate' dest='$WORKSPACE'"
$ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" -m copy \
@@ -468,7 +519,7 @@ $ANSIBLE all -f 5 -i "$WORKSPACE/inventory" -m file \
-a "path='$WORKSPACE/logs' state=directory"
# Record a file to reproduce this build
-reproduce
+reproduce "$JOB_PROJECTS"
# Run ansible to do setup_host on all nodes.
echo "Setting up the hosts"
@@ -494,8 +545,8 @@ EOF
}
echo "... this takes a few seconds (logs at logs/devstack-gate-setup-host.txt.gz)"
-$ANSIBLE all -f 5 -i "$WORKSPACE/inventory" -m shell \
- -a "$(run_command setup_host)" &> "$WORKSPACE/logs/devstack-gate-setup-host.txt"
+$ANSIBLE_PLAYBOOK -f 5 -i "$WORKSPACE/inventory" "$WORKSPACE/devstack-gate/playbooks/setup_host.yaml" \
+ &> "$WORKSPACE/logs/devstack-gate-setup-host.txt"
if [ -n "$DEVSTACK_GATE_GRENADE" ]; then
start=$(date +%s)
@@ -552,15 +603,23 @@ fi
# devstack-vm-gate-wrap.sh will not automagically run the hooks on each node.
# Run pre test hook if we have one
with_timeout call_hook_if_defined "pre_test_hook"
+GATE_RETVAL=$?
+if [ $GATE_RETVAL -ne 0 ]; then
+ echo "ERROR: the pre-test setup script run by this job failed - exit code: $GATE_RETVAL"
+fi
# Run the gate function
-echo "Running gate_hook"
-with_timeout "gate_hook"
-GATE_RETVAL=$?
+if [ $GATE_RETVAL -eq 0 ]; then
+ echo "Running gate_hook"
+ with_timeout "gate_hook"
+ GATE_RETVAL=$?
+ if [ $GATE_RETVAL -ne 0 ]; then
+ echo "ERROR: the main setup script run by this job failed - exit code: $GATE_RETVAL"
+ fi
+fi
RETVAL=$GATE_RETVAL
if [ $GATE_RETVAL -ne 0 ]; then
- echo "ERROR: the main setup script run by this job failed - exit code: $GATE_RETVAL"
echo " please look at the relevant log files to determine the root cause"
echo "Running devstack worlddump.py"
sudo $BASE/new/devstack/tools/worlddump.py -d $BASE/logs
@@ -585,7 +644,10 @@ echo "... this takes 3 - 4 minutes (logs at logs/devstack-gate-cleanup-host.txt.
$ANSIBLE all -f 5 -i "$WORKSPACE/inventory" -m shell \
-a "$(run_command cleanup_host)" &> "$WORKSPACE/devstack-gate-cleanup-host.txt"
$ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" -m synchronize \
- -a "mode=pull src='$BASE/logs/' dest='$BASE/logs/subnode-{{ host_counter }}'"
+ -a "mode=pull src='$BASE/logs/' dest='$BASE/logs/subnode-{{ host_counter }}' copy_links=yes"
sudo mv $WORKSPACE/devstack-gate-cleanup-host.txt $BASE/logs/
+# Generate ARA report
+/tmp/ansible/bin/ara generate html $BASE/logs/ara
+
exit $RETVAL
diff --git a/devstack-vm-gate.sh b/devstack-vm-gate.sh
index 068a2fe4..2245b83d 100755
--- a/devstack-vm-gate.sh
+++ b/devstack-vm-gate.sh
@@ -20,6 +20,7 @@
# limitations under the License.
set -o errexit
+set -o xtrace
# Keep track of the devstack directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
@@ -29,11 +30,26 @@ TOP_DIR=$(cd $(dirname "$0") && pwd)
# Import common functions
source $TOP_DIR/functions.sh
+# Get access to iniset and friends
+
+# NOTE(sdague): as soon as we put
+# iniget into dsconf, we can remove this.
+source $BASE/new/devstack/inc/ini-config
+
+# redefine localrc_set to use dsconf
+function localrc_set {
+ local lcfile=$1
+ local key=$2
+ local value=$3
+ $DSCONF setlc "$1" "$2" "$3"
+}
echo $PPID > $WORKSPACE/gate.pid
source `dirname "$(readlink -f "$0")"`/functions.sh
+# Need to set FIXED_RANGE for pre-ocata devstack
FIXED_RANGE=${DEVSTACK_GATE_FIXED_RANGE:-10.1.0.0/20}
+IPV4_ADDRS_SAFE_TO_USE=${DEVSTACK_GATE_IPV4_ADDRS_SAFE_TO_USE:-${DEVSTACK_GATE_FIXED_RANGE:-10.1.0.0/20}}
FLOATING_RANGE=${DEVSTACK_GATE_FLOATING_RANGE:-172.24.5.0/24}
PUBLIC_NETWORK_GATEWAY=${DEVSTACK_GATE_PUBLIC_NETWORK_GATEWAY:-172.24.5.1}
# The next two values are used in multinode testing and are related
@@ -50,16 +66,26 @@ PUBLIC_NETWORK_GATEWAY=${DEVSTACK_GATE_PUBLIC_NETWORK_GATEWAY:-172.24.5.1}
FLOATING_HOST_PREFIX=${DEVSTACK_GATE_FLOATING_HOST_PREFIX:-172.24.4}
FLOATING_HOST_MASK=${DEVSTACK_GATE_FLOATING_HOST_MASK:-23}
-EXTERNAL_BRIDGE_MTU=1450
+# Get the smallest local MTU
+LOCAL_MTU=$(ip link show | sed -ne 's/.*mtu \([0-9]\+\).*/\1/p' | sort -n | head -1)
+# 50 bytes is overhead for vxlan (which is greater than GRE
+# allowing us to use either overlay option with this MTU.
+EXTERNAL_BRIDGE_MTU=$((LOCAL_MTU - 50))
function setup_ssh {
+ # Copy the SSH key from /etc/nodepool/id_rsa{.pub} to the specified
+ # directory on 'all' the nodes. 'all' the nodes consists of the primary
+ # node and all of the subnodes.
local path=$1
+ local dest_file=${2:-id_rsa}
$ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m file \
-a "path='$path' mode=0700 state=directory"
$ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m copy \
-a "src=/etc/nodepool/id_rsa.pub dest='$path/authorized_keys' mode=0600"
$ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m copy \
- -a "src=/etc/nodepool/id_rsa dest='$path/id_rsa' mode=0400"
+ -a "src=/etc/nodepool/id_rsa.pub dest='$path/${dest_file}.pub' mode=0600"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m copy \
+ -a "src=/etc/nodepool/id_rsa dest='$path/${dest_file}' mode=0400"
}
function setup_nova_net_networking {
@@ -76,10 +102,8 @@ function setup_nova_net_networking {
$sub_nodes
ovs_vxlan_bridge "br_flat" $primary_node "False" 128 \
$sub_nodes
- cat <>"$localrc"
-FLAT_INTERFACE=br_flat
-PUBLIC_INTERFACE=br_pub
-EOF
+ localrc_set $localrc "FLAT_INTERFACE" "br_flat"
+ localrc_set $localrc "PUBLIC_INTERFACE" "br_pub"
}
function setup_multinode_connectivity {
@@ -93,41 +117,65 @@ function setup_multinode_connectivity {
# ``old_or_new`` - should the subnodes be computed on the old side
# or new side. For grenade where we don't upgrade them, calculate
# on the old side.
- local localrc=$BASE/new/devstack/localrc
local old_or_new="new"
+ local localconf
+ local devstack_dir
if [[ "$mode" == "grenade" ]]; then
- localrc=$BASE/new/grenade/devstack.localrc
+ localconf=$BASE/new/grenade/devstack.localrc
old_or_new="old"
+ devstack_dir=$BASE/$old_or_new/devstack
+ else
+ devstack_dir=$BASE/$old_or_new/devstack
+ localconf=$devstack_dir/local.conf
fi
# set explicit paths on all conf files we're writing so that
# current working directory doesn't introduce subtle bugs.
- local devstack_dir=$BASE/$old_or_new/devstack
- local sub_localrc=$devstack_dir/sub_localrc
- local localconf=$devstack_dir/local.conf
+ local sub_localconf=$devstack_dir/sub_local.conf
set -x # for now enabling debug and do not turn it off
- setup_localrc $old_or_new "$sub_localrc" "sub"
+ setup_localrc $old_or_new "$sub_localconf" "sub"
local primary_node
primary_node=$(cat /etc/nodepool/primary_node_private)
local sub_nodes
sub_nodes=$(cat /etc/nodepool/sub_nodes_private)
if [[ "$DEVSTACK_GATE_NEUTRON" -ne '1' ]]; then
- setup_nova_net_networking $localrc $primary_node $sub_nodes
- cat <>"$sub_localrc"
-FLAT_INTERFACE=br_flat
-PUBLIC_INTERFACE=br_pub
-MULTI_HOST=True
-EOF
- cat <>"$localrc"
-MULTI_HOST=True
-EOF
+ setup_nova_net_networking $localconf $primary_node $sub_nodes
+ localrc_set $sub_localconf "FLAT_INTERFACE" "br_flat"
+ localrc_set $sub_localconf "PUBLIC_INTERFACE" "br_pub"
+ localrc_set $sub_localconf "MULTI_HOST" "True"
+ # and on the master
+ localrc_set $localconf "MULTI_HOST" "True"
elif [[ "$DEVSTACK_GATE_NEUTRON_DVR" -eq '1' ]]; then
ovs_vxlan_bridge "br-ex" $primary_node "True" 1 \
$FLOATING_HOST_PREFIX $FLOATING_HOST_MASK \
$sub_nodes
fi
+ if [[ "$DEVSTACK_GATE_IRONIC" -eq '1' ]]; then
+ # NOTE(vsaienko) Ironic VMs will be connected to this bridge
+ # in order to have access to VMs on another nodes.
+ ovs_vxlan_bridge "br_ironic_vxlan" $primary_node "False" 128 \
+ $sub_nodes
+
+ localrc_set "$sub_localconf" "HOST_TOPOLOGY" "multinode"
+ localrc_set "$sub_localconf" "HOST_TOPOLOGY_ROLE" "subnode"
+ # NOTE(vsaienko) we assume for now that we using only 1 subnode,
+ # each subnode should have different switch name (bridge) as it is used
+ # by networking-generic-switch to uniquely identify switch.
+ localrc_set "$sub_localconf" "IRONIC_VM_NETWORK_BRIDGE" "sub1brbm"
+ localrc_set "$sub_localconf" "OVS_PHYSICAL_BRIDGE" "sub1brbm"
+ localrc_set "$sub_localconf" "ENABLE_TENANT_TUNNELS" "False"
+ localrc_set "$sub_localconf" "IRONIC_KEY_FILE" "$BASE/new/.ssh/ironic_key"
+
+ localrc_set "$localconf" "HOST_TOPOLOGY" "multinode"
+ localrc_set "$localconf" "HOST_TOPOLOGY_ROLE" "primary"
+ localrc_set "$localconf" "HOST_TOPOLOGY_SUBNODES" "$sub_nodes"
+ localrc_set "$localconf" "IRONIC_KEY_FILE" "$BASE/new/.ssh/ironic_key"
+ localrc_set "$localconf" "GENERIC_SWITCH_KEY_FILE" "$BASE/new/.ssh/ironic_key"
+ localrc_set "$localconf" "ENABLE_TENANT_TUNNELS" "False"
+ fi
+
echo "Preparing cross node connectivity"
setup_ssh $BASE/new/.ssh
setup_ssh ~root/.ssh
@@ -152,11 +200,20 @@ EOF
for NODE in $sub_nodes; do
remote_copy_file /tmp/tmp_hosts $NODE:/tmp/tmp_hosts
remote_command $NODE "cat /tmp/tmp_hosts | sudo tee --append /etc/hosts > /dev/null"
- cp $sub_localrc /tmp/tmp_sub_localrc
- echo "HOST_IP=$NODE" >> /tmp/tmp_sub_localrc
- remote_copy_file /tmp/tmp_sub_localrc $NODE:$devstack_dir/localrc
- remote_copy_file $localconf $NODE:$localconf
+ cp $sub_localconf /tmp/tmp_sub_localconf
+ localrc_set /tmp/tmp_sub_localconf "HOST_IP" "$NODE"
+ remote_copy_file /tmp/tmp_sub_localconf $NODE:$devstack_dir/local.conf
done
+
+ # NOTE(vsaienko) we need to have ssh connection among nodes to manage
+ # VMs from ironic-conductor or setup networking from networking-generic-switch
+ if [[ "$DEVSTACK_GATE_IRONIC" -eq '1' ]]; then
+ echo "Copy ironic key among nodes"
+ # NOTE(vsaienko) setup_ssh() set 700 to all parent directories when they doesn't
+ # exist. Keep ironic keys in other directory than /opt/stack/data to avoid setting
+ # 700 on /opt/stack/data
+ setup_ssh $BASE/new/.ssh ironic_key
+ fi
}
function setup_networking {
@@ -165,16 +222,33 @@ function setup_networking {
# sauce to function.
if [[ "$DEVSTACK_GATE_TOPOLOGY" != "multinode" ]] && \
[[ "$DEVSTACK_GATE_NEUTRON" -ne '1' ]]; then
- local localrc=$BASE/new/devstack/localrc
if [[ "$mode" == "grenade" ]]; then
- localrc=$BASE/new/grenade/devstack.localrc
+ setup_nova_net_networking "$BASE/new/grenade/devstack.local.conf.base" "127.0.0.1"
+ setup_nova_net_networking "$BASE/new/grenade/devstack.local.conf.target" "127.0.0.1"
+ else
+ setup_nova_net_networking "$BASE/new/devstack/local.conf" "127.0.0.1"
fi
- setup_nova_net_networking "$localrc" "127.0.0.1"
elif [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]]; then
setup_multinode_connectivity $mode
fi
}
+# Discovers compute nodes (subnodes) and maps them to cells.
+# NOTE(mriedem): We want to remove this if/when nova supports auto-registration
+# of computes with cells, but that's not happening in Ocata.
+function discover_hosts {
+ # We have to run this on the primary node AFTER the subnodes have been
+ # setup. Since discover_hosts is really only needed for Ocata, this checks
+ # to see if the script exists in the devstack installation first.
+ # NOTE(danms): This is ||'d with an assertion that the script does not exist,
+ # so that if we actually failed the script, we'll exit nonzero here instead
+ # of ignoring failures along with the case where there is no script.
+ # TODO(mriedem): Would be nice to do this with wrapped lines.
+ $ANSIBLE primary -f 5 -i "$WORKSPACE/inventory" -m shell \
+ -a "cd $BASE/new/devstack/ && (test -f tools/discover_hosts.sh && sudo -H -u stack DSTOOLS_VERSION=$DSTOOLS_VERSION stdbuf -oL -eL ./tools/discover_hosts.sh) || (! test -f tools/discover_hosts.sh)" \
+ &> "$WORKSPACE/logs/devstack-gate-discover-hosts.txt"
+}
+
function setup_localrc {
local localrc_oldnew=$1;
local localrc_file=$2
@@ -206,157 +280,147 @@ function setup_localrc {
sudo yum install -y PyYAML
fi
fi
- MY_ENABLED_SERVICES=`cd $BASE/new/devstack-gate && ./test-matrix.py -b $branch_for_matrix -f $DEVSTACK_GATE_FEATURE_MATRIX`
- local original_enabled_services=$MY_ENABLED_SERVICES
- # TODO(afazekas): Move to the feature grid
- # TODO(afazekas): add c-vol
+ local test_matrix_role='primary'
if [[ $role = sub ]]; then
- MY_ENABLED_SERVICES="n-cpu,ceilometer-acompute,dstat"
- if [[ "$DEVSTACK_GATE_NEUTRON" -eq "1" ]]; then
- MY_ENABLED_SERVICES+=",q-agt"
- if [[ "$DEVSTACK_GATE_NEUTRON_DVR" -eq "1" ]]; then
- # As per reference architecture described in
- # https://wiki.openstack.org/wiki/Neutron/DVR
- # for DVR multi-node, add the following services
- # on all compute nodes (q-fwaas being optional):
- MY_ENABLED_SERVICES+=",q-l3,q-fwaas,q-meta"
- fi
- else
- MY_ENABLED_SERVICES+=",n-net,n-api-meta"
- fi
+ test_matrix_role='subnode'
fi
+ MY_ENABLED_SERVICES=$(cd $BASE/new/devstack-gate && ./test-matrix.py -b $branch_for_matrix -f $DEVSTACK_GATE_FEATURE_MATRIX -r $test_matrix_role)
+ local original_enabled_services=$(cd $BASE/new/devstack-gate && ./test-matrix.py -b $branch_for_matrix -f $DEVSTACK_GATE_FEATURE_MATRIX -r primary)
+ echo "MY_ENABLED_SERVICES: ${MY_ENABLED_SERVICES}"
+ echo "original_enabled_services: ${original_enabled_services}"
+
# Allow optional injection of ENABLED_SERVICES from the calling context
if [[ ! -z $ENABLED_SERVICES ]] ; then
MY_ENABLED_SERVICES+=,$ENABLED_SERVICES
fi
fi
+ if [[ ! -z $DEVSTACK_GATE_USE_PYTHON3 ]] ; then
+ localrc_set $localrc_file "USE_PYTHON3" "$DEVSTACK_GATE_USE_PYTHON3"
+ fi
+
if [[ "$DEVSTACK_GATE_CEPH" == "1" ]]; then
- echo "CINDER_ENABLED_BACKENDS=ceph:ceph" >>"$localrc_file"
- echo "TEMPEST_STORAGE_PROTOCOL=ceph" >>"$localrc_file"
- echo "CEPH_LOOPBACK_DISK_SIZE=8G" >>"$localrc_file"
+ localrc_set $localrc_file "CINDER_ENABLED_BACKENDS" "ceph:ceph"
+ localrc_set $localrc_file "TEMPEST_STORAGE_PROTOCOL" "ceph"
fi
# the exercises we *don't* want to test on for devstack
SKIP_EXERCISES=boot_from_volume,bundle,client-env,euca
if [[ "$DEVSTACK_GATE_NEUTRON" -eq "1" ]]; then
- echo "Q_USE_DEBUG_COMMAND=True" >>"$localrc_file"
- echo "NETWORK_GATEWAY=10.1.0.1" >>"$localrc_file"
+ localrc_set $localrc_file "Q_USE_DEBUG_COMMAND" "True"
+ localrc_set $localrc_file "NETWORK_GATEWAY" "10.1.0.1"
fi
if [[ "$DEVSTACK_GATE_NEUTRON_DVR" -eq "1" ]]; then
if [[ "$DEVSTACK_GATE_TOPOLOGY" != "aio" ]] && [[ $role = sub ]]; then
# The role for L3 agents running on compute nodes is 'dvr'
- echo "Q_DVR_MODE=dvr" >>"$localrc_file"
+ localrc_set $localrc_file "Q_DVR_MODE" "dvr"
else
# The role for L3 agents running on controller nodes is 'dvr_snat'
- echo "Q_DVR_MODE=dvr_snat" >>"$localrc_file"
+ localrc_set $localrc_file "Q_DVR_MODE" "dvr_snat"
fi
fi
- cat <>"$localrc_file"
-USE_SCREEN=False
-DEST=$BASE/$localrc_oldnew
-# move DATA_DIR outside of DEST to keep DEST a bit cleaner
-DATA_DIR=$BASE/data
-ACTIVE_TIMEOUT=90
-BOOT_TIMEOUT=90
-ASSOCIATE_TIMEOUT=60
-TERMINATE_TIMEOUT=60
-MYSQL_PASSWORD=secretmysql
-DATABASE_PASSWORD=secretdatabase
-RABBIT_PASSWORD=secretrabbit
-ADMIN_PASSWORD=secretadmin
-SERVICE_PASSWORD=secretservice
-SERVICE_TOKEN=111222333444
-SWIFT_HASH=1234123412341234
-ROOTSLEEP=0
-# ERROR_ON_CLONE should never be set to FALSE in gate jobs.
-# Setting up git trees must be done by zuul
-# because it needs specific git references directly from gerrit
-# to correctly do testing. Otherwise you are not testing
-# the code you have posted for review.
-ERROR_ON_CLONE=True
-ENABLED_SERVICES=$MY_ENABLED_SERVICES
-SKIP_EXERCISES=$SKIP_EXERCISES
-SERVICE_HOST=127.0.0.1
-# Screen console logs will capture service logs.
-SYSLOG=False
-SCREEN_LOGDIR=$BASE/$localrc_oldnew/screen-logs
-LOGFILE=$BASE/$localrc_oldnew/devstacklog.txt
-VERBOSE=True
-FIXED_RANGE=$FIXED_RANGE
-FLOATING_RANGE=$FLOATING_RANGE
-PUBLIC_NETWORK_GATEWAY=$PUBLIC_NETWORK_GATEWAY
-FIXED_NETWORK_SIZE=4096
-VIRT_DRIVER=$DEVSTACK_GATE_VIRT_DRIVER
-SWIFT_REPLICAS=1
-LOG_COLOR=False
-# Don't reset the requirements.txt files after g-r updates
-UNDO_REQUIREMENTS=False
-CINDER_PERIODIC_INTERVAL=10
-export OS_NO_CACHE=True
-CEILOMETER_BACKEND=$DEVSTACK_GATE_CEILOMETER_BACKEND
-LIBS_FROM_GIT=$DEVSTACK_PROJECT_FROM_GIT
-DATABASE_QUERY_LOGGING=True
-# set this until all testing platforms have libvirt >= 1.2.11
-# see bug #1501558
-EBTABLES_RACE_FIX=True
-EOF
+ localrc_set "$localrc_file" "USE_SCREEN" "False"
+ localrc_set "$localrc_file" "DEST" "$BASE/$localrc_oldnew"
+ # move DATA_DIR outside of DEST to keep DEST a bit cleaner
+ localrc_set "$localrc_file" "DATA_DIR" "$BASE/data"
+ localrc_set "$localrc_file" "ACTIVE_TIMEOUT" "90"
+ localrc_set "$localrc_file" "BOOT_TIMEOUT" "90"
+ localrc_set "$localrc_file" "ASSOCIATE_TIMEOUT" "60"
+ localrc_set "$localrc_file" "TERMINATE_TIMEOUT" "60"
+ localrc_set "$localrc_file" "MYSQL_PASSWORD" "secretmysql"
+ localrc_set "$localrc_file" "DATABASE_PASSWORD" "secretdatabase"
+ localrc_set "$localrc_file" "RABBIT_PASSWORD" "secretrabbit"
+ localrc_set "$localrc_file" "ADMIN_PASSWORD" "secretadmin"
+ localrc_set "$localrc_file" "SERVICE_PASSWORD" "secretservice"
+ localrc_set "$localrc_file" "SERVICE_TOKEN" "111222333444"
+ localrc_set "$localrc_file" "SWIFT_HASH" "1234123412341234"
+ localrc_set "$localrc_file" "ROOTSLEEP" "0"
+ # ERROR_ON_CLONE should never be set to FALSE in gate jobs.
+ # Setting up git trees must be done by zuul
+ # because it needs specific git references directly from gerrit
+ # to correctly do testing. Otherwise you are not testing
+ # the code you have posted for review.
+ localrc_set "$localrc_file" "ERROR_ON_CLONE" "True"
+ # Since git clone can't be used for novnc in gates, force it to install the packages
+ localrc_set "$localrc_file" "NOVNC_FROM_PACKAGE" "True"
+ localrc_set "$localrc_file" "ENABLED_SERVICES" "$MY_ENABLED_SERVICES"
+ localrc_set "$localrc_file" "SKIP_EXERCISES" "$SKIP_EXERCISES"
+ # Screen console logs will capture service logs.
+ localrc_set "$localrc_file" "SYSLOG" "False"
+ localrc_set "$localrc_file" "SCREEN_LOGDIR" "$BASE/$localrc_oldnew/screen-logs"
+ localrc_set "$localrc_file" "LOGFILE" "$BASE/$localrc_oldnew/devstacklog.txt"
+ localrc_set "$localrc_file" "VERBOSE" "True"
+ localrc_set "$localrc_file" "FIXED_RANGE" "$FIXED_RANGE"
+ localrc_set "$localrc_file" "IPV4_ADDRS_SAFE_TO_USE" "$IPV4_ADDRS_SAFE_TO_USE"
+ localrc_set "$localrc_file" "FLOATING_RANGE" "$FLOATING_RANGE"
+ localrc_set "$localrc_file" "PUBLIC_NETWORK_GATEWAY" "$PUBLIC_NETWORK_GATEWAY"
+ localrc_set "$localrc_file" "FIXED_NETWORK_SIZE" "4096"
+ localrc_set "$localrc_file" "VIRT_DRIVER" "$DEVSTACK_GATE_VIRT_DRIVER"
+ localrc_set "$localrc_file" "SWIFT_REPLICAS" "1"
+ localrc_set "$localrc_file" "LOG_COLOR" "False"
+ # Don't reset the requirements.txt files after g-r updates
+ localrc_set "$localrc_file" "UNDO_REQUIREMENTS" "False"
+ localrc_set "$localrc_file" "CINDER_PERIODIC_INTERVAL" "10"
+ localrc_set "$localrc_file" "export OS_NO_CACHE" "True"
+ localrc_set "$localrc_file" "CEILOMETER_BACKEND" "$DEVSTACK_GATE_CEILOMETER_BACKEND"
+ localrc_set "$localrc_file" "LIBS_FROM_GIT" "$DEVSTACK_PROJECT_FROM_GIT"
+ # set this until all testing platforms have libvirt >= 1.2.11
+ # see bug #1501558
+ localrc_set "$localrc_file" "EBTABLES_RACE_FIX" "True"
+
+ if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] && [[ $DEVSTACK_GATE_NEUTRON -eq "1" ]]; then
+ # Reduce the MTU on br-ex to match the MTU of underlying tunnels
+ localrc_set "$localrc_file" "PUBLIC_BRIDGE_MTU" "$EXTERNAL_BRIDGE_MTU"
+ fi
if [[ "$DEVSTACK_CINDER_SECURE_DELETE" -eq "0" ]]; then
- echo "CINDER_SECURE_DELETE=False" >>"$localrc_file"
+ localrc_set "$localrc_file" "CINDER_SECURE_DELETE" "False"
fi
+ localrc_set "$localrc_file" "CINDER_VOLUME_CLEAR" "${DEVSTACK_CINDER_VOLUME_CLEAR}"
if [[ "$DEVSTACK_GATE_TEMPEST_HEAT_SLOW" -eq "1" ]]; then
- echo "HEAT_CREATE_TEST_IMAGE=False" >>"$localrc_file"
+ localrc_set "$localrc_file" "HEAT_CREATE_TEST_IMAGE" "False"
# Use Fedora 20 for heat test image, it has heat-cfntools pre-installed
- echo "HEAT_FETCHED_TEST_IMAGE=Fedora-i386-20-20131211.1-sda" >>"$localrc_file"
+ localrc_set "$localrc_file" "HEAT_FETCHED_TEST_IMAGE" "Fedora-i386-20-20131211.1-sda"
fi
if [[ "$DEVSTACK_GATE_VIRT_DRIVER" == "libvirt" ]]; then
if [[ -n "$DEVSTACK_GATE_LIBVIRT_TYPE" ]]; then
- echo "LIBVIRT_TYPE=${DEVSTACK_GATE_LIBVIRT_TYPE}" >>localrc
+ localrc_set "$localrc_file" "LIBVIRT_TYPE" "${DEVSTACK_GATE_LIBVIRT_TYPE}"
fi
fi
- if [[ "$DEVSTACK_GATE_VIRT_DRIVER" == "openvz" ]]; then
- echo "SKIP_EXERCISES=${SKIP_EXERCISES},volumes" >>"$localrc_file"
- echo "DEFAULT_INSTANCE_TYPE=m1.small" >>"$localrc_file"
- echo "DEFAULT_INSTANCE_USER=root" >>"$localrc_file"
- echo "DEFAULT_INSTANCE_TYPE=m1.small" >>exerciserc
- echo "DEFAULT_INSTANCE_USER=root" >>exerciserc
- fi
-
if [[ "$DEVSTACK_GATE_VIRT_DRIVER" == "ironic" ]]; then
- export TEMPEST_OS_TEST_TIMEOUT=900
- echo "VIRT_DRIVER=ironic" >>"$localrc_file"
- echo "IRONIC_BAREMETAL_BASIC_OPS=True" >>"$localrc_file"
- echo "IRONIC_VM_LOG_DIR=$BASE/$localrc_oldnew/ironic-bm-logs" >>"$localrc_file"
- echo "DEFAULT_INSTANCE_TYPE=baremetal" >>"$localrc_file"
- echo "BUILD_TIMEOUT=600" >>"$localrc_file"
- echo "IRONIC_CALLBACK_TIMEOUT=600" >>"$localrc_file"
- echo "Q_AGENT=openvswitch" >>"$localrc_file"
- echo "Q_ML2_TENANT_NETWORK_TYPE=vxlan" >>"$localrc_file"
+ export TEMPEST_OS_TEST_TIMEOUT=${DEVSTACK_GATE_OS_TEST_TIMEOUT:-1200}
+ localrc_set "$localrc_file" "IRONIC_DEPLOY_DRIVER" "$DEVSTACK_GATE_IRONIC_DRIVER"
+ localrc_set "$localrc_file" "IRONIC_BAREMETAL_BASIC_OPS" "True"
+ localrc_set "$localrc_file" "IRONIC_VM_LOG_DIR" "$BASE/$localrc_oldnew/ironic-bm-logs"
+ localrc_set "$localrc_file" "DEFAULT_INSTANCE_TYPE" "baremetal"
+ localrc_set "$localrc_file" "BUILD_TIMEOUT" "${DEVSTACK_GATE_TEMPEST_BAREMETAL_BUILD_TIMEOUT:-600}"
+ localrc_set "$localrc_file" "IRONIC_CALLBACK_TIMEOUT" "600"
+ localrc_set "$localrc_file" "Q_AGENT" "openvswitch"
+ localrc_set "$localrc_file" "Q_ML2_TENANT_NETWORK_TYPE" "vxlan"
if [[ "$DEVSTACK_GATE_IRONIC_BUILD_RAMDISK" -eq 0 ]]; then
- echo "IRONIC_BUILD_DEPLOY_RAMDISK=False" >>"$localrc_file"
+ localrc_set "$localrc_file" "IRONIC_BUILD_DEPLOY_RAMDISK" "False"
+ else
+ localrc_set "$localrc_file" "IRONIC_BUILD_DEPLOY_RAMDISK" "True"
fi
- if [[ "$DEVSTACK_GATE_IRONIC_DRIVER" == "agent_ssh" ]]; then
- echo "SWIFT_ENABLE_TEMPURLS=True" >>"$localrc_file"
- echo "SWIFT_TEMPURL_KEY=secretkey" >>"$localrc_file"
- echo "IRONIC_ENABLED_DRIVERS=fake,agent_ssh,agent_ipmitool" >>"$localrc_file"
- echo "IRONIC_DEPLOY_DRIVER=agent_ssh" >>"$localrc_file"
+ if [[ -z "${DEVSTACK_GATE_IRONIC_DRIVER%%agent*}" ]]; then
+ localrc_set "$localrc_file" "SWIFT_ENABLE_TEMPURLS" "True"
+ localrc_set "$localrc_file" "SWIFT_TEMPURL_KEY" "secretkey"
+ localrc_set "$localrc_file" "IRONIC_ENABLED_DRIVERS" "fake,agent_ssh,agent_ipmitool"
# agent driver doesn't support ephemeral volumes yet
- echo "IRONIC_VM_EPHEMERAL_DISK=0" >>"$localrc_file"
+ localrc_set "$localrc_file" "IRONIC_VM_EPHEMERAL_DISK" "0"
# agent CoreOS ramdisk is a little heavy
- echo "IRONIC_VM_SPECS_RAM=1024" >>"$localrc_file"
- echo "IRONIC_VM_COUNT=1" >>"$localrc_file"
+ localrc_set "$localrc_file" "IRONIC_VM_SPECS_RAM" "1024"
else
- echo "IRONIC_VM_EPHEMERAL_DISK=1" >>"$localrc_file"
- echo "IRONIC_VM_COUNT=3" >>"$localrc_file"
+ localrc_set "$localrc_file" "IRONIC_ENABLED_DRIVERS" "fake,pxe_ssh,pxe_ipmitool"
+ localrc_set "$localrc_file" "IRONIC_VM_EPHEMERAL_DISK" "1"
fi
fi
@@ -365,44 +429,42 @@ EOF
echo "XenAPI must have DEVSTACK_GATE_XENAPI_DOM0_IP, DEVSTACK_GATE_XENAPI_DOMU_IP and DEVSTACK_GATE_XENAPI_PASSWORD all set"
exit 1
fi
- cat >> "$localrc_file" << EOF
-SKIP_EXERCISES=${SKIP_EXERCISES},volumes
-XENAPI_PASSWORD=${DEVSTACK_GATE_XENAPI_PASSWORD}
-XENAPI_CONNECTION_URL=http://${DEVSTACK_GATE_XENAPI_DOM0_IP}
-VNCSERVER_PROXYCLIENT_ADDRESS=${DEVSTACK_GATE_XENAPI_DOM0_IP}
-VIRT_DRIVER=xenserver
-
-# A separate xapi network is created with this name-label
-FLAT_NETWORK_BRIDGE=vmnet
-
-# A separate xapi network on eth4 serves the purpose of the public network.
-# This interface is added in Citrix's XenServer environment as an internal
-# interface
-PUBLIC_INTERFACE=eth4
-
-# The xapi network "vmnet" is connected to eth3 in domU
-# We need to explicitly specify these, as the devstack/xenserver driver
-# sets GUEST_INTERFACE_DEFAULT
-VLAN_INTERFACE=eth3
-FLAT_INTERFACE=eth3
-
-# Explicitly set HOST_IP, so that it will be passed down to xapi,
-# thus it will be able to reach glance
-HOST_IP=${DEVSTACK_GATE_XENAPI_DOMU_IP}
-SERVICE_HOST=${DEVSTACK_GATE_XENAPI_DOMU_IP}
-
-# Disable firewall
-XEN_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-
-# Disable agent
-EXTRA_OPTS=("xenapi_disable_agent=True")
-
-# Add a separate device for volumes
-VOLUME_BACKING_DEVICE=/dev/xvdb
-
-# Set multi-host config
-MULTI_HOST=1
-EOF
+ localrc_set "$localrc_file" "SKIP_EXERCISES" "${SKIP_EXERCISES},volumes"
+ localrc_set "$localrc_file" "XENAPI_PASSWORD" "${DEVSTACK_GATE_XENAPI_PASSWORD}"
+ localrc_set "$localrc_file" "XENAPI_CONNECTION_URL" "http://${DEVSTACK_GATE_XENAPI_DOM0_IP}"
+ localrc_set "$localrc_file" "VNCSERVER_PROXYCLIENT_ADDRESS" "${DEVSTACK_GATE_XENAPI_DOM0_IP}"
+ localrc_set "$localrc_file" "VIRT_DRIVER" "xenserver"
+
+ # A separate xapi network is created with this name-label
+ localrc_set "$localrc_file" "FLAT_NETWORK_BRIDGE" "vmnet"
+
+ # A separate xapi network on eth4 serves the purpose of the public network.
+ # This interface is added in Citrix's XenServer environment as an internal
+ # interface
+ localrc_set "$localrc_file" "PUBLIC_INTERFACE" "eth4"
+
+ # The xapi network "vmnet" is connected to eth3 in domU
+ # We need to explicitly specify these, as the devstack/xenserver driver
+ # sets GUEST_INTERFACE_DEFAULT
+ localrc_set "$localrc_file" "VLAN_INTERFACE" "eth3"
+ localrc_set "$localrc_file" "FLAT_INTERFACE" "eth3"
+
+ # Explicitly set HOST_IP, so that it will be passed down to xapi,
+ # thus it will be able to reach glance
+ localrc_set "$localrc_file" "HOST_IP" "${DEVSTACK_GATE_XENAPI_DOMU_IP}"
+ localrc_set "$localrc_file" "SERVICE_HOST" "${DEVSTACK_GATE_XENAPI_DOMU_IP}"
+
+ # Disable firewall
+ localrc_set "$localrc_file" "XEN_FIREWALL_DRIVER" "nova.virt.firewall.NoopFirewallDriver"
+
+ # Disable agent
+ localrc_set "$localrc_file" "EXTRA_OPTS" "(\"xenapi_disable_agent=True\")"
+
+ # Add a separate device for volumes
+ localrc_set "$localrc_file" "VOLUME_BACKING_DEVICE" "/dev/xvdb"
+
+ # Set multi-host config
+ localrc_set "$localrc_file" "MULTI_HOST" "1"
fi
if [[ "$DEVSTACK_GATE_TEMPEST" -eq "1" ]]; then
@@ -412,117 +474,159 @@ EOF
#
# The 24G setting is expected to be enough even
# in parallel run.
- echo "VOLUME_BACKING_FILE_SIZE=24G" >> "$localrc_file"
+ localrc_set "$localrc_file" "VOLUME_BACKING_FILE_SIZE" "24G"
# in order to ensure glance http tests don't time out, we
# specify the TEMPEST_HTTP_IMAGE address that's in infra on a
# service we need to be up for anything to work anyway.
- echo "TEMPEST_HTTP_IMAGE=http://git.openstack.org/static/openstack.png" >> "$localrc_file"
+ localrc_set "$localrc_file" "TEMPEST_HTTP_IMAGE" "http://git.openstack.org/static/openstack.png"
fi
if [[ "$DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION" -eq "1" ]]; then
- echo "TEMPEST_ALLOW_TENANT_ISOLATION=False" >>"$localrc_file"
+ localrc_set "$localrc_file" "TEMPEST_ALLOW_TENANT_ISOLATION" "False"
fi
if [[ -n "$DEVSTACK_GATE_GRENADE" ]]; then
if [[ "$localrc_oldnew" == "old" ]]; then
- echo "GRENADE_PHASE=base" >> "$localrc_file"
+ localrc_set "$localrc_file" "GRENADE_PHASE" "base"
else
- echo "GRENADE_PHASE=target" >> "$localrc_file"
+ localrc_set "$localrc_file" "GRENADE_PHASE" "target"
fi
- # services deployed with mod wsgi cannot be upgraded or migrated
- # until https://launchpad.net/bugs/1365105 is resolved.
- case $GRENADE_NEW_BRANCH in
- "stable/icehouse")
- ;&
- "stable/juno")
- echo "KEYSTONE_USE_MOD_WSGI=False" >> "$localrc_file"
- ;;
- "stable/kilo")
- # while both juno and kilo can run under wsgi, they
- # can't run a code only upgrade because the
- # configuration assumes copying python files around
- # during config stage. This might be addressed by
- # keystone team later, hence separate comment and code
- # block.
- echo "KEYSTONE_USE_MOD_WSGI=False" >> "$localrc_file"
- ;;
- esac
- echo "CEILOMETER_USE_MOD_WSGI=False" >> "$localrc_file"
+ localrc_set "$localrc_file" "CEILOMETER_USE_MOD_WSGI" "False"
fi
if [[ "$DEVSTACK_GATE_TEMPEST_LARGE_OPS" -eq "1" ]]; then
# NOTE(danms): Temporary transition to =NUM_RESOURCES
- echo "VIRT_DRIVER=fake" >> "$localrc_file"
- echo "TEMPEST_LARGE_OPS_NUMBER=50" >>"$localrc_file"
+ localrc_set "$localrc_file" "VIRT_DRIVER" "fake"
+ localrc_set "$localrc_file" "TEMPEST_LARGE_OPS_NUMBER" "50"
elif [[ "$DEVSTACK_GATE_TEMPEST_LARGE_OPS" -gt "1" ]]; then
# use fake virt driver and 10 copies of nova-compute
- echo "VIRT_DRIVER=fake" >> "$localrc_file"
+ localrc_set "$localrc_file" "VIRT_DRIVER" "fake"
# To make debugging easier, disabled until bug 1218575 is fixed.
# echo "NUMBER_FAKE_NOVA_COMPUTE=10" >>"$localrc_file"
- echo "TEMPEST_LARGE_OPS_NUMBER=$DEVSTACK_GATE_TEMPEST_LARGE_OPS" >>"$localrc_file"
+ localrc_set "$localrc_file" "TEMPEST_LARGE_OPS_NUMBER" "$DEVSTACK_GATE_TEMPEST_LARGE_OPS"
fi
if [[ "$DEVSTACK_GATE_CONFIGDRIVE" -eq "1" ]]; then
- echo "FORCE_CONFIG_DRIVE=True" >>"$localrc_file"
+ localrc_set "$localrc_file" "FORCE_CONFIG_DRIVE" "True"
else
- echo "FORCE_CONFIG_DRIVE=False" >>"$localrc_file"
+ localrc_set "$localrc_file" "FORCE_CONFIG_DRIVE" "False"
fi
if [[ "$CEILOMETER_NOTIFICATION_TOPICS" ]]; then
# Add specified ceilometer notification topics to localrc
# Set to notifications,profiler to enable profiling
- echo "CEILOMETER_NOTIFICATION_TOPICS=$CEILOMETER_NOTIFICATION_TOPICS" >>"$localrc_file"
+ localrc_set "$localrc_file" "CEILOMETER_NOTIFICATION_TOPICS" "$CEILOMETER_NOTIFICATION_TOPICS"
fi
if [[ "$DEVSTACK_GATE_INSTALL_TESTONLY" -eq "1" ]]; then
# Sometimes we do want the test packages
- echo "INSTALL_TESTONLY_PACKAGES=True" >> "$localrc_file"
+ localrc_set "$localrc_file" "INSTALL_TESTONLY_PACKAGES" "True"
fi
if [[ "$DEVSTACK_GATE_TOPOLOGY" != "aio" ]]; then
- echo "NOVA_ALLOW_MOVE_TO_SAME_HOST=False" >> "$localrc_file"
- echo "export LIVE_MIGRATION_AVAILABLE=True" >> "$localrc_file"
- echo "export USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=True" >> "$localrc_file"
+ localrc_set "$localrc_file" "NOVA_ALLOW_MOVE_TO_SAME_HOST" "False"
+ localrc_set "$localrc_file" "LIVE_MIGRATION_AVAILABLE" "True"
+ localrc_set "$localrc_file" "USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION" "True"
local primary_node=`cat /etc/nodepool/primary_node_private`
- echo "SERVICE_HOST=$primary_node" >>"$localrc_file"
+ localrc_set "$localrc_file" "SERVICE_HOST" "$primary_node"
if [[ "$role" = sub ]]; then
if [[ $original_enabled_services =~ "qpid" ]]; then
- echo "QPID_HOST=$primary_node" >>"$localrc_file"
+ localrc_set "$localrc_file" "QPID_HOST" "$primary_node"
fi
if [[ $original_enabled_services =~ "rabbit" ]]; then
- echo "RABBIT_HOST=$primary_node" >>"$localrc_file"
+ localrc_set "$localrc_file" "RABBIT_HOST" "$primary_node"
fi
- echo "DATABASE_HOST=$primary_node" >>"$localrc_file"
+ localrc_set "$localrc_file" "DATABASE_HOST" "$primary_node"
if [[ $original_enabled_services =~ "mysql" ]]; then
- echo "DATABASE_TYPE=mysql" >>"$localrc_file"
+ localrc_set "$localrc_file" "DATABASE_TYPE" "mysql"
else
- echo "DATABASE_TYPE=postgresql" >>"$localrc_file"
+ localrc_set "$localrc_file" "DATABASE_TYPE" "postgresql"
fi
- echo "GLANCE_HOSTPORT=$primary_node:9292" >>"$localrc_file"
- echo "Q_HOST=$primary_node" >>"$localrc_file"
+ localrc_set "$localrc_file" "GLANCE_HOSTPORT" "$primary_node:9292"
+ localrc_set "$localrc_file" "Q_HOST" "$primary_node"
# Set HOST_IP in subnodes before copying localrc to each node
else
- echo "HOST_IP=$primary_node" >>"$localrc_file"
+ localrc_set "$localrc_file" "HOST_IP" "$primary_node"
fi
fi
+ # If you specify a section of a project-config job with
+ #
+ # local_conf:
+ # conf: |
+ # [[local|localrc]]
+ # foo=a
+ # [[post-config|$NEUTRON_CONF]]
+ # [DEFAULT]
+ # global_physnet_mtu = 1400
+ #
+ # Then that whole local.conf fragment will get carried through to
+ # this special file, and we'll merge those values into *all*
+ # local.conf files in the job. That includes subnodes, and new &
+ # old in grenade.
+ #
+ # NOTE(sdague): the name of this file should be considered
+ # internal only, and jobs should not write to it directly, they
+ # should only use the project-config stanza.
+ if [[ -e "/tmp/dg-local.conf" ]]; then
+ $DSCONF merge_lc "$localrc_file" "/tmp/dg-local.conf"
+ fi
+
# a way to pass through arbitrary devstack config options so that
# we don't need to add new devstack-gate options every time we
# want to create a new config.
- if [[ -n "$DEVSTACK_LOCAL_CONFIG" ]]; then
- echo "$DEVSTACK_LOCAL_CONFIG" >>"$localrc_file"
+ #
+ # NOTE(sdague): this assumes these are old school "localrc"
+ # sections, we should probably figure out a way to warn over using
+ # these.
+ if [[ "$role" = sub ]]; then
+ # If we are in a multinode environment, we may want to specify 2
+ # different sets of plugins
+ if [[ -n "$DEVSTACK_SUBNODE_CONFIG" ]]; then
+ $DSCONF setlc_raw "$localrc_file" "$DEVSTACK_SUBNODE_CONFIG"
+ else
+ if [[ -n "$DEVSTACK_LOCAL_CONFIG" ]]; then
+ $DSCONF setlc_raw "$localrc_file" "$DEVSTACK_LOCAL_CONFIG"
+ fi
+ fi
+ else
+ if [[ -n "$DEVSTACK_LOCAL_CONFIG" ]]; then
+ $DSCONF setlc_raw "$localrc_file" "$DEVSTACK_LOCAL_CONFIG"
+ fi
+ fi
+
+ # NOTE(sdague): new style local.conf declarations which need to
+ # merge late. Projects like neutron build up a lot of scenarios
+ # based on this, but they have to apply them late.
+ #
+ # TODO(sdague): subnode support.
+ if [[ -n "$DEVSTACK_LOCALCONF" ]]; then
+ local ds_conf_late="/tmp/ds-conf-late.conf"
+ echo "$DEVSTACK_LOCALCONF" > "$ds_conf_late"
+ $DSCONF merge_lc "$localrc_file" "$ds_conf_late"
fi
}
-if [[ -n "$DEVSTACK_GATE_GRENADE" ]]; then
- cd $BASE/old/devstack
- setup_localrc "old" "localrc" "primary"
+# This makes the stack user own the $BASE files and also changes the
+# permissions on the logs directory so we can write to the logs when running
+# devstack or grenade. This must be called AFTER setup_localrc.
+function setup_access_for_stack_user {
+ # Make the workspace owned by the stack user
+ # It is not clear if the ansible file module can do this for us
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell \
+ -a "chown -R stack:stack '$BASE'"
+ # allow us to add logs
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell \
+ -a "chmod 777 '$WORKSPACE/logs'"
+}
- cd $BASE/new/devstack
- setup_localrc "new" "localrc" "primary"
+if [[ -n "$DEVSTACK_GATE_GRENADE" ]]; then
+ cd $BASE/new/grenade
+ setup_localrc "old" "devstack.local.conf.base" "primary"
+ setup_localrc "new" "devstack.local.conf.target" "primary"
cat <$BASE/new/grenade/localrc
BASE_RELEASE=old
@@ -535,8 +639,8 @@ TARGET_DEVSTACK_DIR=\$TARGET_RELEASE_DIR/devstack
TARGET_DEVSTACK_BRANCH=$GRENADE_NEW_BRANCH
TARGET_RUN_SMOKE=False
SAVE_DIR=\$BASE_RELEASE_DIR/save
-DO_NOT_UPGRADE_SERVICES=$DO_NOT_UPGRADE_SERVICES
TEMPEST_CONCURRENCY=$TEMPEST_CONCURRENCY
+OS_TEST_TIMEOUT=$DEVSTACK_GATE_OS_TEST_TIMEOUT
VERBOSE=False
PLUGIN_DIR=\$TARGET_RELEASE_DIR
EOF
@@ -549,14 +653,14 @@ EOF
fi
if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]]; then
- echo -e "[[post-config|\$NOVA_CONF]]\n[libvirt]\ncpu_mode=custom\ncpu_model=gate64" >> local.conf
+ # ensure local.conf exists to remove conditional logic
if [[ $DEVSTACK_GATE_NEUTRON -eq "1" ]]; then
- echo -e "[[post-config|\$NEUTRON_CONF]]\n[DEFAULT]\nnetwork_device_mtu=$EXTERNAL_BRIDGE_MTU" >> local.conf
+ $DSCONF setlc_conf "devstack.local.conf.base" "post-config" "\$NEUTRON_CONF" \
+ "DEFAULT" "global_physnet_mtu" "$EXTERNAL_BRIDGE_MTU"
+ $DSCONF setlc_conf "devstack.local.conf.target" "post-config" "\$NEUTRON_CONF" \
+ "DEFAULT" "global_physnet_mtu" "$EXTERNAL_BRIDGE_MTU"
fi
- # get this in our base config
- cp local.conf $BASE/old/devstack
-
# build the post-stack.sh config, this will be run as stack user so no sudo required
cat > $BASE/new/grenade/post-stack.sh <> local.conf
if [[ $DEVSTACK_GATE_NEUTRON -eq "1" ]]; then
- echo -e "[[post-config|\$NEUTRON_CONF]]\n[DEFAULT]\nnetwork_device_mtu=$EXTERNAL_BRIDGE_MTU" >> local.conf
+ localconf_set "local.conf" "post-config" "\$NEUTRON_CONF" \
+ "DEFAULT" "global_physnet_mtu" "$EXTERNAL_BRIDGE_MTU"
fi
fi
setup_networking
- # Make the workspace owned by the stack user
- # It is not clear if the ansible file module can do this for us
- $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell \
- -a "chown -R stack:stack '$BASE'"
- # allow us to add logs
- $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell \
- -a "chmod 777 '$WORKSPACE/logs'"
+ setup_access_for_stack_user
echo "Running devstack"
echo "... this takes 10 - 15 minutes (logs in logs/devstacklog.txt.gz)"
start=$(date +%s)
$ANSIBLE primary -f 5 -i "$WORKSPACE/inventory" -m shell \
- -a "cd '$BASE/new/devstack' && sudo -H -u stack stdbuf -oL -eL ./stack.sh executable=/bin/bash" \
+ -a "cd '$BASE/new/devstack' && sudo -H -u stack DSTOOLS_VERSION=$DSTOOLS_VERSION stdbuf -oL -eL ./stack.sh executable=/bin/bash" \
&> "$WORKSPACE/logs/devstack-early.txt"
+ if [ -d "$BASE/data/CA" ] && [ -f "$BASE/data/ca-bundle.pem" ] ; then
+ # Sync any data files which include certificates to be used if
+ # TLS is enabled
+ $ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" --sudo -m file \
+ -a "path='$BASE/data' state=directory owner=stack group=stack mode=0755"
+ $ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" --sudo -m file \
+ -a "path='$BASE/data/CA' state=directory owner=stack group=stack mode=0755"
+ $ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" \
+ --sudo -m synchronize \
+ -a "mode=push src='$BASE/data/ca-bundle.pem' dest='$BASE/data/ca-bundle.pem'"
+ sudo $ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" \
+ --sudo -u $USER -m synchronize \
+ -a "mode=push src='$BASE/data/CA' dest='$BASE/data'"
+ fi
# Run non controller setup after controller is up. This is necessary
# because services like nova apparently expect to have the controller in
# place before anything else.
$ANSIBLE subnodes -f 5 -i "$WORKSPACE/inventory" -m shell \
- -a "cd '$BASE/new/devstack' && sudo -H -u stack stdbuf -oL -eL ./stack.sh executable=/bin/bash" \
+ -a "cd '$BASE/new/devstack' && sudo -H -u stack DSTOOLS_VERSION=$DSTOOLS_VERSION stdbuf -oL -eL ./stack.sh executable=/bin/bash" \
&> "$WORKSPACE/logs/devstack-subnodes-early.txt"
end=$(date +%s)
took=$((($end - $start) / 60))
@@ -620,6 +733,9 @@ else
echo "WARNING: devstack run took > 20 minutes, this is a very slow node."
fi
+ # Discover the hosts on a cells v2 deployment.
+ discover_hosts
+
# provide a check that the right db was running
# the path are different for fedora and red hat.
if [[ -f /usr/bin/yum ]]; then
@@ -640,19 +756,6 @@ else
exit 1
fi
fi
-
- if [[ "$DEVSTACK_GATE_TOPOLOGY" != "aio" ]] && [[ $DEVSTACK_GATE_NEUTRON -eq "1" ]]; then
- # NOTE(afazekas): The cirros lp#1301958 does not support MTU setting via dhcp,
- # simplest way the have tunneling working, with dvsm, without increasing the host system MTU
- # is to decreasion the MTU on br-ex
- # TODO(afazekas): Configure the mtu smarter on the devstack side
- MTU_NODES=primary
- if [[ "$DEVSTACK_GATE_NEUTRON_DVR" -eq "1" ]]; then
- MTU_NODES=all
- fi
- $ANSIBLE "$MTU_NODES" -f 5 -i "$WORKSPACE/inventory" -m shell \
- -a "sudo ip link set mtu $EXTERNAL_BRIDGE_MTU dev br-ex"
- fi
fi
if [[ "$DEVSTACK_GATE_UNSTACK" -eq "1" ]]; then
@@ -672,28 +775,7 @@ if [[ "$DEVSTACK_GATE_EXERCISES" -eq "1" ]]; then
-a "cd '$BASE/new/devstack' && sudo -H -u stack ./exercise.sh"
fi
-function load_subunit_stream {
- local stream=$1;
- pushd $BASE/new/tempest/
- sudo testr load --force-init $stream
- popd
-}
-
-
if [[ "$DEVSTACK_GATE_TEMPEST" -eq "1" ]]; then
- #TODO(mtreinish): This if block can be removed after all the nodepool images
- # are built using with streams dir instead
- echo "Loading previous tempest runs subunit streams into testr"
- if [[ -f /opt/git/openstack/tempest/.testrepository/0 ]]; then
- temp_stream=`mktemp`
- subunit-1to2 /opt/git/openstack/tempest/.testrepository/0 > $temp_stream
- load_subunit_stream $temp_stream
- elif [[ -d /opt/git/openstack/tempest/preseed-streams ]]; then
- for stream in /opt/git/openstack/tempest/preseed-streams/* ; do
- load_subunit_stream $stream
- done
- fi
-
# under tempest isolation tempest will need to write .tox dir, log files
if [[ -d "$BASE/new/tempest" ]]; then
sudo chown -R tempest:stack $BASE/new/tempest
@@ -708,14 +790,14 @@ if [[ "$DEVSTACK_GATE_TEMPEST" -eq "1" ]]; then
sudo chmod -R o+rx $BASE/new/devstack/files
fi
- # In the future we might want to increase the number of compute nodes.
+ # In the future we might want to increase the number of compute nodes.
# This will ensure that multinode jobs consist of 2 nodes.
# As a part of tempest configuration, it should be executed
# before the DEVSTACK_GATE_TEMPEST_NOTESTS check, because the DEVSTACK_GATE_TEMPEST
# guarantees that tempest should be configured, no matter should
# tests be executed or not.
if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]]; then
- iniset -sudo $BASE/new/tempest/etc/tempest.conf compute min_compute_nodes 2
+ sudo $DSCONF iniset $BASE/new/tempest/etc/tempest.conf compute min_compute_nodes 2
fi
# if set, we don't need to run Tempest at all
@@ -723,6 +805,16 @@ if [[ "$DEVSTACK_GATE_TEMPEST" -eq "1" ]]; then
exit 0
fi
+ # There are some parts of devstack that call the neutron api to verify the
+ # extension. We should not ever trust this for gate testing. This checks to
+ # ensure on master we always are using the default value. (on stable we hard
+ # code a list of available extensions so we can't use this)
+ neutron_extensions=$(iniget "$BASE/new/tempest/etc/tempest.conf" "neutron-feature-enabled" "api_extensions")
+ if [[ $GIT_BRANCH == 'master' && ($neutron_extensions == 'all' || $neutron_extensions == '') ]] ; then
+ echo "Devstack misconfugred tempest and changed the value of api_extensions"
+ exit 1
+ fi
+
# From here until the end we rely on the fact that all the code fails if
# something is wrong, to enforce exit on bad test results.
set -o errexit
@@ -736,14 +828,16 @@ if [[ "$DEVSTACK_GATE_TEMPEST" -eq "1" ]]; then
if [[ "$DEVSTACK_GATE_TEMPEST_REGEX" != "" ]] ; then
if [[ "$DEVSTACK_GATE_TEMPEST_ALL_PLUGINS" -eq "1" ]]; then
echo "Running tempest with plugins and a custom regex filter"
- $TEMPEST_COMMAND -eall-plugin -- --concurrency=$TEMPEST_CONCURRENCY $DEVSTACK_GATE_TEMPEST_REGEX
+ $TEMPEST_COMMAND -eall-plugin -- $DEVSTACK_GATE_TEMPEST_REGEX --concurrency=$TEMPEST_CONCURRENCY
+ sudo -H -u tempest .tox/all-plugin/bin/tempest list-plugins
else
echo "Running tempest with a custom regex filter"
- $TEMPEST_COMMAND -eall -- --concurrency=$TEMPEST_CONCURRENCY $DEVSTACK_GATE_TEMPEST_REGEX
+ $TEMPEST_COMMAND -eall -- $DEVSTACK_GATE_TEMPEST_REGEX --concurrency=$TEMPEST_CONCURRENCY
fi
elif [[ "$DEVSTACK_GATE_TEMPEST_ALL_PLUGINS" -eq "1" ]]; then
echo "Running tempest all-plugins test suite"
$TEMPEST_COMMAND -eall-plugin -- --concurrency=$TEMPEST_CONCURRENCY
+ sudo -H -u tempest .tox/all-plugin/bin/tempest list-plugins
elif [[ "$DEVSTACK_GATE_TEMPEST_ALL" -eq "1" ]]; then
echo "Running tempest all test suite"
$TEMPEST_COMMAND -eall -- --concurrency=$TEMPEST_CONCURRENCY
@@ -756,12 +850,6 @@ if [[ "$DEVSTACK_GATE_TEMPEST" -eq "1" ]]; then
elif [[ "$DEVSTACK_GATE_TEMPEST_STRESS" -eq "1" ]] ; then
echo "Running stress tests"
$TEMPEST_COMMAND -estress -- $DEVSTACK_GATE_TEMPEST_STRESS_ARGS
- elif [[ "$DEVSTACK_GATE_TEMPEST_HEAT_SLOW" -eq "1" ]] ; then
- echo "Running slow heat tests"
- $TEMPEST_COMMAND -eheat-slow -- --concurrency=$TEMPEST_CONCURRENCY
- elif [[ "$DEVSTACK_GATE_TEMPEST_LARGE_OPS" -ge "1" ]] ; then
- echo "Running large ops tests"
- $TEMPEST_COMMAND -elarge-ops -- --concurrency=$TEMPEST_CONCURRENCY
elif [[ "$DEVSTACK_GATE_SMOKE_SERIAL" -eq "1" ]] ; then
echo "Running tempest smoke tests"
$TEMPEST_COMMAND -esmoke-serial
diff --git a/features.yaml b/features.yaml
index 50b35f19..73eaf29a 100644
--- a/features.yaml
+++ b/features.yaml
@@ -1,10 +1,11 @@
config:
default:
- master: [default, ceilometer, glance, horizon, nova, swift, cinder, keystone]
+ master: [default, ceilometer, glance, horizon, nova, placement, swift, cinder, keystone]
+ ocata: [default, ceilometer, glance, horizon, nova, placement, swift, cinder, keystone]
+ newton: [default, ceilometer, glance, horizon, nova, swift, cinder, keystone]
+ mitaka: [default, ceilometer, glance, horizon, nova, swift, cinder, keystone]
liberty: [default, ceilometer, glance, horizon, nova, swift, cinder, keystone]
kilo: [default, ceilometer, glance, horizon, nova, swift, cinder, keystone]
- juno: [default, ceilometer, glance, horizon, nova, swift, cinder, keystone]
- icehouse: [default, ceilometer, glance, horizon, nova, swift, cinder, keystone]
# This can be used by functional jobs that only want their dependencies installed
# and don't need to incur the overhead of installing all services in the process.
no_services: [default]
@@ -15,7 +16,7 @@ config:
features: [postgresql]
# feature changes for different test matrixes
grenade:
- rm-features: [trove, sahara, neutron-adv]
+ rm-features: [trove, sahara, neutron-adv, horizon]
tempest:
features: [tempest]
# feature changes for different configs of existing services
@@ -43,17 +44,23 @@ config:
features: [ceph]
heat:
features: [heat]
+ tlsproxy:
+ features: [tlsproxy]
+ cinder_mn_grenade:
+ features: [cinder-mn-grenade]
+ neutron_dvr:
+ features: [neutron-dvr]
branches:
# The value of ""default" is the name of the "trunk" branch
default: master
- # Normalized branch names only here, e.g. stable/icehouse => icehouse
- allowed: [master, liberty, kilo, juno, icehouse]
+ # Normalized branch names only here, e.g. stable/ocata => ocata
+ allowed: [master, ocata, newton, mitaka, liberty, kilo]
-features:
+primary:
default:
base:
- services: [mysql, rabbit, dstat]
+ services: [mysql, rabbit, dstat, peakmem_tracker]
ceilometer:
base:
@@ -73,9 +80,7 @@ features:
nova:
base:
- services: [n-api, n-cond, n-cpu, n-crt, n-net, n-obj, n-sch]
- icehouse:
- compute-ext:
+ services: [n-api, n-cauth, n-cond, n-cpu, n-crt, n-net, n-novnc, n-obj, n-sch]
nova-md:
base:
@@ -86,18 +91,29 @@ features:
services: [n-cell]
rm-compute-ext: [agregates, hosts]
+ placement:
+ base:
+ services: [placement-api]
+
neutron:
base:
- services: [quantum, q-svc, q-agt, q-dhcp, q-l3, q-meta, q-metering]
+ services: [q-svc, q-agt, q-dhcp, q-l3, q-meta, q-metering]
rm-services: [n-net]
neutron-adv:
base:
- services: [q-lbaas, q-fwaas]
rm-services: [n-net]
+ mitaka:
+ services: [q-lbaas]
+ liberty:
+ services: [q-lbaas]
kilo:
services: [q-vpn]
+ neutron-dvr:
+ base:
+ services: []
+
swift:
base:
services: [s-proxy, s-account, s-container, s-object]
@@ -106,6 +122,12 @@ features:
base:
services: [cinder, c-api, c-vol, c-sch, c-bak]
+ # This will be used to disable c-vol on primary node when running multinode grenade
+ # job that will test compatibility of new c-api, c-sch (primary) and old c-vol and c-bak (sub).
+ cinder-mn-grenade:
+ base:
+ rm-services: [c-vol, c-bak]
+
heat:
base:
services: [heat, h-api, h-api-cfn, h-api-cw, h-eng]
@@ -154,3 +176,89 @@ features:
ceph:
base:
services: [ceph]
+
+ tlsproxy:
+ base:
+ services: [tls-proxy]
+ # TLS proxy didn't work properly until ocata
+ liberty:
+ rm-services: [tls-proxy]
+ mitaka:
+ rm-services: [tls-proxy]
+ newton:
+ rm-services: [tls-proxy]
+
+subnode:
+ default:
+ base:
+ services: [dstat, peakmem_tracker]
+
+ ceilometer:
+ base:
+ services: [ceilometer-acompute]
+
+ cinder:
+ base:
+ services: [c-vol, c-bak]
+
+ cinder-mn-grenade:
+ base:
+ services: []
+
+ glance:
+ base:
+ services: []
+
+ horizon:
+ base:
+ services: []
+
+ ironic:
+ base:
+ rm-services: [c-vol, c-bak]
+ services: [ir-api, ir-cond]
+
+ keystone:
+ base:
+ services: []
+
+ neutron:
+ base:
+ rm-services: [n-net, n-api-meta]
+ services: [q-agt]
+
+ neutron-adv:
+ base:
+ services: []
+
+ neutron-dvr:
+ base:
+ rm-services: [n-net, n-api-meta]
+ services: [q-agt, q-l3, q-meta]
+
+ nova:
+ base:
+ services: [n-cpu, n-net, n-api-meta]
+
+ placement:
+ base:
+ services: [placement-client]
+
+ swift:
+ base:
+ services: []
+
+ tempest:
+ base:
+ services: []
+
+ tlsproxy:
+ base:
+ services: [tls-proxy]
+ # TLS proxy didn't work properly until ocata
+ liberty:
+ rm-services: [tls-proxy]
+ mitaka:
+ rm-services: [tls-proxy]
+ newton:
+ rm-services: [tls-proxy]
diff --git a/functions.sh b/functions.sh
index 6bb9af06..77f20b2b 100644
--- a/functions.sh
+++ b/functions.sh
@@ -99,7 +99,7 @@ function tsfilter {
function _ping_check {
local host=$1
local times=${2:-20}
- echo "Testing ICMP connectivit to $host"
+ echo "Testing ICMP connectivity to $host"
ping -c $times $host
}
@@ -117,24 +117,6 @@ function _http_check {
done
}
-# do a few network tests to baseline how bad we are
-function network_sanity_check {
- echo "Performing network sanity check..."
- PIP_CONFIG_FILE=/etc/pip.conf
- if [[ -f $PIP_CONFIG_FILE ]]; then
- line=$(cat $PIP_CONFIG_FILE|grep --max-count 1 index-url)
- pypi_url=${line#*=}
- pypi_host=$(echo $pypi_url|grep -Po '.*?//\K.*?(?=/)')
-
- _ping_check $pypi_host
- _http_check $pypi_url
- fi
-
- # rax ubuntu mirror
- _ping_check mirror.rackspace.com
- _http_check http://mirror.rackspace.com/ubuntu/dists/trusty/Release.gpg
-}
-
# create the start timer for when the job began
function start_timer {
# first make sure the time is right, so we don't go into crazy land
@@ -161,7 +143,7 @@ function start_timer {
function remaining_time {
local now=`date +%s`
local elapsed=$(((now - START_TIME) / 60))
- REMAINING_TIME=$((DEVSTACK_GATE_TIMEOUT - elapsed - 5))
+ export REMAINING_TIME=$((DEVSTACK_GATE_TIMEOUT - elapsed - 5))
echo "Job timeout set to: $REMAINING_TIME minutes"
if [ ${REMAINING_TIME} -le 0 ]; then
echo "Already timed out."
@@ -171,14 +153,47 @@ function remaining_time {
# Create a script to reproduce this build
function reproduce {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+
+ JOB_PROJECTS=$1
cat > $WORKSPACE/logs/reproduce.sh <> $WORKSPACE/logs/reproduce.sh
+ # first get all keys that match our filter and then output the whole line
+ # that will ensure that multi-line env vars are set properly
+ for KEY in $(printenv -0 | grep -z -Z '\(DEVSTACK\|GRENADE_PLUGINRC\|ZUUL\)' | sed -z -n 's/^\([^=]\+\)=.*/\1\n/p'); do
+ echo "declare -x ${KEY}=\"${!KEY}\"" >> $WORKSPACE/logs/reproduce.sh
+ done
+ # If TEMPEST_CONCURRENCY has been explicitly set to 1, then save it in reproduce.sh
+ if [ "${TEMPEST_CONCURRENCY}" -eq 1 ]; then
+ echo "declare -x TEMPEST_CONCURRENCY=\"${TEMPEST_CONCURRENCY}\"" >> $WORKSPACE/logs/reproduce.sh
+ fi
+ if [ -n "$JOB_PROJECTS" ] ; then
+ echo "declare -x PROJECTS=\"$JOB_PROJECTS\"" >> $WORKSPACE/logs/reproduce.sh
+ fi
+ for fun in pre_test_hook gate_hook post_test_hook ; do
+ if function_exists $fun ; then
+ declare -fp $fun >> $WORKSPACE/logs/reproduce.sh
+ fi
+ done
cat >> $WORKSPACE/logs/reproduce.sh </dev/null; then
- echo "Need to add hostname to /etc/hosts"
- sudo bash -c 'echo "127.0.1.1 $HOSTNAME" >>/etc/hosts'
- fi
-
-}
-
function fix_disk_layout {
- # HPCloud and Rackspace performance nodes provide no swap, but do have
- # ephemeral disks we can use. For providers with no ephemeral disks, such
- # as OVH or Internap, create and use a sparse swapfile on the root
- # filesystem.
- # HPCloud also doesn't have enough space on / for two devstack installs,
+ # Don't attempt to fix disk layout more than once
+ [[ -e /etc/fixed_disk_layout ]] && return 0 || sudo touch /etc/fixed_disk_layout
+
+ # Ensure virtual machines from different providers all have at least 8GB of
+ # swap.
+ # Use an ephemeral disk if there is one or create and use a swapfile.
+ # Rackspace also doesn't have enough space on / for two devstack installs,
# so we partition the disk and mount it on /opt, syncing the previous
# contents of /opt over.
- if [ `grep SwapTotal /proc/meminfo | awk '{ print $2; }'` -eq 0 ]; then
+ SWAPSIZE=8192
+ swapcurrent=$(( $(grep SwapTotal /proc/meminfo | awk '{ print $2; }') / 1024 ))
+
+ if [[ $swapcurrent -lt $SWAPSIZE ]]; then
if [ -b /dev/xvde ]; then
DEV='/dev/xvde'
else
@@ -341,7 +350,7 @@ function fix_disk_layout {
sudo umount ${DEV}
fi
sudo parted ${DEV} --script -- mklabel msdos
- sudo parted ${DEV} --script -- mkpart primary linux-swap 1 8192
+ sudo parted ${DEV} --script -- mkpart primary linux-swap 1 ${SWAPSIZE}
sudo parted ${DEV} --script -- mkpart primary ext2 8192 -1
sudo mkswap ${DEV}1
sudo mkfs.ext4 ${DEV}2
@@ -350,15 +359,30 @@ function fix_disk_layout {
sudo find /opt/ -mindepth 1 -maxdepth 1 -exec mv {} /mnt/ \;
sudo umount /mnt
sudo mount ${DEV}2 /opt
+
+ # Sanity check
+ grep -q ${DEV}1 /proc/swaps || exit 1
+ grep -q ${DEV}2 /proc/mounts || exit 1
else
# If no ephemeral devices are available, use root filesystem
# Don't use sparse device to avoid wedging when disk space and
# memory are both unavailable.
local swapfile='/root/swapfile'
- sudo fallocate -l 8192M ${swapfile}
+ sudo touch ${swapfile}
+ swapdiff=$(( $SWAPSIZE - $swapcurrent ))
+
+ if sudo df -T ${swapfile} | grep -q ext ; then
+ sudo fallocate -l ${swapdiff}M ${swapfile}
+ else
+ # Cannot fallocate on filesystems like XFS
+ sudo dd if=/dev/zero of=${swapfile} bs=1M count=${swapdiff}
+ fi
sudo chmod 600 ${swapfile}
sudo mkswap ${swapfile}
sudo swapon ${swapfile}
+
+ # Sanity check
+ grep -q ${swapfile} /proc/swaps || exit 1
fi
fi
@@ -379,7 +403,7 @@ function fix_disk_layout {
sudo sed -i '/vm.swappiness/d' /etc/sysctl.conf
# This sets swappiness low; we really don't want to be relying on
# cloud I/O based swap during our runs
- sudo sysctl -w vm.swappiness=10
+ sudo sysctl -w vm.swappiness=30
}
# Set up a project in accordance with the future state proposed by
@@ -489,9 +513,6 @@ function setup_workspace {
sudo mkdir -p $DEST
sudo chown -R $USER:$USER $DEST
- #TODO(jeblair): remove when this is no longer created by the image
- rm -fr ~/workspace-cache/
-
# The vm template update job should cache the git repos
# Move them to where we expect:
echo "Using branch: $base_branch"
@@ -524,105 +545,6 @@ function setup_workspace {
$xtrace
}
-function copy_mirror_config {
- # The pydistutils.cfg file is added by Puppet. Some CIs may not rely on
- # Puppet to do the base node installation
- if [ -f ~/.pydistutils.cfg ]; then
- sudo install -D -m0644 -o root -g root ~/.pydistutils.cfg ~root/.pydistutils.cfg
-
- sudo install -D -m0644 -o stack -g stack ~/.pydistutils.cfg ~stack/.pydistutils.cfg
-
- sudo install -D -m0644 -o tempest -g tempest ~/.pydistutils.cfg ~tempest/.pydistutils.cfg
- fi
-}
-
-function setup_host {
- # Enabled detailed logging, since output of this function is redirected
- local xtrace=$(set +o | grep xtrace)
- set -o xtrace
-
- echo "What's our kernel?"
- uname -a
-
- # capture # of cpus
- echo "NProc has discovered $(nproc) CPUs"
- cat /proc/cpuinfo
-
- # This is necessary to keep sudo from complaining
- fix_etc_hosts
-
- # We set some home directories under $BASE, make sure it exists.
- sudo mkdir -p $BASE
-
- # Start with a fresh syslog
- if uses_debs; then
- sudo stop rsyslog
- sudo mv /var/log/syslog /var/log/syslog-pre-devstack
- sudo mv /var/log/kern.log /var/log/kern_log-pre-devstack
- sudo touch /var/log/syslog
- sudo chown /var/log/syslog --ref /var/log/syslog-pre-devstack
- sudo chmod /var/log/syslog --ref /var/log/syslog-pre-devstack
- sudo chmod a+r /var/log/syslog
- sudo touch /var/log/kern.log
- sudo chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack
- sudo chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack
- sudo chmod a+r /var/log/kern.log
- sudo start rsyslog
- elif is_fedora; then
- # save timestamp and use journalctl to dump everything since
- # then at the end
- date +"%Y-%m-%d %H:%M:%S" | sudo tee $BASE/log-start-timestamp.txt
- fi
-
- # Create a stack user for devstack to run as, so that we can
- # revoke sudo permissions from that user when appropriate.
- sudo useradd -U -s /bin/bash -d $BASE/new -m stack
- # Use 755 mode on the user dir regarless to the /etc/login.defs setting
- sudo chmod 755 $BASE/new
- TEMPFILE=`mktemp`
- echo "stack ALL=(root) NOPASSWD:ALL" >$TEMPFILE
- chmod 0440 $TEMPFILE
- sudo chown root:root $TEMPFILE
- sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
-
- # Create user's ~/.cache directory with proper permissions, ensuring later
- # 'sudo pip install's do not create it owned by root.
- sudo mkdir -p $BASE/new/.cache
- sudo chown -R stack:stack $BASE/new/.cache
-
- # Create a tempest user for tempest to run as, so that we can
- # revoke sudo permissions from that user when appropriate.
- # NOTE(sdague): we should try to get the state dump to be a
- # neutron API call in Icehouse to remove this.
- sudo useradd -U -s /bin/bash -m tempest
- TEMPFILE=`mktemp`
- echo "tempest ALL=(root) NOPASSWD:/sbin/ip" >$TEMPFILE
- echo "tempest ALL=(root) NOPASSWD:/sbin/iptables" >>$TEMPFILE
- echo "tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client" >>$TEMPFILE
- chmod 0440 $TEMPFILE
- sudo chown root:root $TEMPFILE
- sudo mv $TEMPFILE /etc/sudoers.d/51_tempest_sh
-
- # Future useradd calls should strongly consider also updating
- # ~/.pydisutils.cfg in the copy_mirror_config
- # function if tox/pip will be used at all.
-
- # If we will be testing OpenVZ, make sure stack is a member of the vz group
- if [ "$DEVSTACK_GATE_VIRT_DRIVER" == "openvz" ]; then
- sudo usermod -a -G vz stack
- fi
-
- # Ensure that all of the users have the openstack mirror config
- copy_mirror_config
-
- # perform network sanity check so that we can characterize the
- # state of the world
- network_sanity_check
-
- # Disable detailed logging as we return to the main script
- $xtrace
-}
-
function archive_test_artifact {
local filename=$1
@@ -668,6 +590,63 @@ function process_testr_artifacts {
fi
}
+function process_stackviz {
+ local project=$1
+ local path_prefix=${2:-new}
+
+ local project_path=$BASE/$path_prefix/$project
+ local log_path=$BASE/logs
+ if [[ "$path_prefix" != "new" ]]; then
+ log_path=$BASE/logs/$path_prefix
+ fi
+
+ local stackviz_path=/opt/stackviz
+ if [ -d $stackviz_path/build ]; then
+ sudo pip install -U $stackviz_path
+
+ # static html+js should be prebuilt during image creation
+ cp -r $stackviz_path/build $log_path/stackviz
+
+ pushd $project_path
+ if [ -f $log_path/dstat-csv_log.txt ]; then
+ sudo testr last --subunit | stackviz-export \
+ --dstat $log_path/dstat-csv_log.txt \
+ --env --stdin \
+ $log_path/stackviz/data
+ else
+ sudo testr last --subunit | stackviz-export \
+ --env --stdin \
+ $log_path/stackviz/data
+ fi
+ sudo chown -R $USER:$USER $log_path/stackviz
+ popd
+ fi
+}
+
+function save_file {
+ local from=$1
+ local to=$2
+ if [[ -z "$to" ]]; then
+ to=$(basename $from)
+ if [[ "$to" != *.txt ]]; then
+ to=${to/\./_}
+ to="$to.txt"
+ fi
+ fi
+ if [[ -f $from ]]; then
+ sudo cp $from $BASE/logs/$to
+ fi
+}
+
+function save_dir {
+ local from=$1
+ local to=$2
+ if [[ -d $from ]]; then
+ sudo cp -r $from $BASE/logs/$to
+ fi
+}
+
+
function cleanup_host {
# TODO: clean this up to be errexit clean
local errexit=$(set +o | grep errexit)
@@ -683,15 +662,16 @@ function cleanup_host {
sleep 2
# No matter what, archive logs and config files
- if uses_debs; then
- sudo cp /var/log/syslog $BASE/logs/syslog.txt
- sudo cp /var/log/kern.log $BASE/logs/kern_log.txt
- elif is_fedora; then
+ if which journalctl ; then
# the journal gives us syslog() and kernel output, so is like
# a concatenation of the above.
sudo journalctl --no-pager \
--since="$(cat $BASE/log-start-timestamp.txt)" \
| sudo tee $BASE/logs/syslog.txt > /dev/null
+ else
+ # assume rsyslog
+ save_file /var/log/syslog
+ save_file /var/log/kern.log
fi
# apache logs; including wsgi stuff like horizon, keystone, etc.
@@ -703,9 +683,7 @@ function cleanup_host {
sudo cp -r ${apache_logs} $BASE/logs/apache
# rabbitmq logs
- if [ -d /var/log/rabbitmq ]; then
- sudo cp -r /var/log/rabbitmq $BASE/logs
- fi
+ save_dir /var/log/rabbitmq
# db logs
if [ -d /var/log/postgresql ] ; then
@@ -713,30 +691,23 @@ function cleanup_host {
# deleted
sudo cp /var/log/postgresql/*log $BASE/logs/postgres.log
fi
- if [ -f /var/log/mysql.err ] ; then
- sudo cp /var/log/mysql.err $BASE/logs/mysql_err.log
- fi
- if [ -f /var/log/mysql.log ] ; then
- sudo cp /var/log/mysql.log $BASE/logs/
- fi
+ save_file /var/log/mysql.err
+ save_file /var/log/mysql.log
# libvirt
- if [ -d /var/log/libvirt ] ; then
- sudo cp -r /var/log/libvirt $BASE/logs/
- sudo cp -r /usr/share/libvirt/cpu_map.xml $BASE/logs/libvirt/cpu_map.xml
- fi
+ save_dir /var/log/libvirt
# sudo config
- sudo cp -r /etc/sudoers.d $BASE/logs/
- sudo cp /etc/sudoers $BASE/logs/sudoers.txt
+ save_dir /etc/sudoers.d
+ save_file /etc/sudoers
# Archive config files
+ # NOTE(mriedem): 'openstack' is added separately since it's not a project
+ # but it is where clouds.yaml is stored in dsvm runs that use it.
sudo mkdir $BASE/logs/etc/
- for PROJECT in $PROJECTS; do
+ for PROJECT in $PROJECTS openstack; do
proj=`basename $PROJECT`
- if [ -d /etc/$proj ]; then
- sudo cp -r /etc/$proj $BASE/logs/etc/
- fi
+ save_dir /etc/$proj etc/
done
# Archive Apache config files
@@ -761,20 +732,24 @@ function cleanup_host {
# avoid excessively long file-names.
find $BASE/old/screen-logs -type l -print0 | \
xargs -0 -I {} sudo cp {} $BASE/logs/old
- sudo cp $BASE/old/devstacklog.txt $BASE/logs/old/
- sudo cp $BASE/old/devstack/localrc $BASE/logs/old/localrc.txt
- sudo cp $BASE/old/tempest/etc/tempest.conf $BASE/logs/old/tempest_conf.txt
- if -f [ $BASE/old/devstack/tempest.log ] ; then
- sudo cp $BASE/old/devstack/tempest.log $BASE/logs/old/verify_tempest_conf.log
+ save_file $BASE/old/devstacklog.txt old/devstacklog.txt
+ save_file $BASE/old/devstacklog.txt.summary old/devstacklog.summary.txt
+ save_file $BASE/old/devstack/localrc old/localrc.txt
+ save_file $BASE/old/devstack/local.conf old/local_conf.txt
+ save_file $BASE/old/tempest/etc/tempest.conf old/tempest_conf.txt
+ save_file $BASE/old/devstack/tempest.log old/verify_tempest_conf.log
+
+ # Copy Ironic nodes console logs if they exist
+ if [ -d $BASE/old/ironic-bm-logs ] ; then
+ sudo mkdir -p $BASE/logs/old/ironic-bm-logs
+ sudo cp $BASE/old/ironic-bm-logs/*.log $BASE/logs/old/ironic-bm-logs/
fi
# dstat CSV log
- if [ -f $BASE/old/dstat-csv.log ]; then
- sudo cp $BASE/old/dstat-csv.log $BASE/logs/old/
- fi
+ save_file $BASE/old/dstat-csv.log old/
# grenade logs
- sudo cp $BASE/new/grenade/localrc $BASE/logs/grenade_localrc.txt
+ save_file $BASE/new/grenade/localrc grenade_localrc.txt
# grenade saved state files - resources created during upgrade tests
# use this directory to dump arbitrary configuration/state files.
@@ -785,9 +760,7 @@ function cleanup_host {
# grenade pluginrc - external grenade plugins use this file to
# communicate with grenade, capture for posterity
- if -f [ $BASE/new/grenade/pluginrc ]; then
- sudo cp $BASE/new/grenade/pluginrc $BASE/logs/grenade_pluginrc.txt
- fi
+ save_file $BASE/new/grenade/pluginrc grenade_pluginrc.txt
# grenade logs directly and uses similar timestampped files to
# devstack. So temporarily copy out & rename the latest log
@@ -795,27 +768,26 @@ function cleanup_host {
# over time-stampped files and put the interesting logs back at
# top-level for easy access
sudo mkdir -p $BASE/logs/grenade
- sudo cp $BASE/logs/grenade.sh.log $BASE/logs/grenade/
- sudo cp $BASE/logs/grenade.sh.log.summary \
- $BASE/logs/grenade/grenade.sh.summary.log
+ save_file $BASE/logs/grenade.sh.log grenade/grenade.sh.log
+ save_file $BASE/logs/grenade.sh.log.summary \
+ grenade/grenade.sh.summary.log
sudo rm $BASE/logs/grenade.sh.*
sudo mv $BASE/logs/grenade/*.log $BASE/logs
sudo rm -rf $BASE/logs/grenade
- if [ -f $BASE/new/grenade/javelin.log ] ; then
- sudo cp $BASE/new/grenade/javelin.log $BASE/logs/javelin.log
- fi
+ save_file $BASE/new/grenade/javelin.log javelin.log
- NEWLOGTARGET=$BASE/logs/new
+ NEWLOGPREFIX=new/
else
- NEWLOGTARGET=$BASE/logs
+ NEWLOGPREFIX=
fi
+ NEWLOGTARGET=$BASE/logs/$NEWLOGPREFIX
find $BASE/new/screen-logs -type l -print0 | \
xargs -0 -I {} sudo cp {} $NEWLOGTARGET/
- sudo cp $BASE/new/devstacklog.txt $NEWLOGTARGET/
- sudo cp $BASE/new/devstack/localrc $NEWLOGTARGET/localrc.txt
- if [ -f $BASE/new/devstack/tempest.log ]; then
- sudo cp $BASE/new/devstack/tempest.log $NEWLOGTARGET/verify_tempest_conf.log
- fi
+ save_file $BASE/new/devstacklog.txt ${NEWLOGPREFIX}devstacklog.txt
+ save_file $BASE/new/devstacklog.txt.summary ${NEWLOGPREFIX}devstacklog.summary.txt
+ save_file $BASE/new/devstack/localrc ${NEWLOGPREFIX}localrc.txt
+ save_file $BASE/new/devstack/local.conf ${NEWLOGPREFIX}local.conf.txt
+ save_file $BASE/new/devstack/tempest.log ${NEWLOGPREFIX}verify_tempest_conf.log
# Copy failure files if they exist
if [ $(ls $BASE/status/stack/*.failure | wc -l) -gt 0 ]; then
@@ -826,22 +798,27 @@ function cleanup_host {
# Copy Ironic nodes console logs if they exist
if [ -d $BASE/new/ironic-bm-logs ] ; then
sudo mkdir -p $BASE/logs/ironic-bm-logs
- sudo cp $BASE/new/ironic-bm-logs/*.log $BASE/logs/ironic-bm-logs/
+ sudo cp -r $BASE/new/ironic-bm-logs/* $BASE/logs/ironic-bm-logs/
fi
# Copy tempest config file
- sudo cp $BASE/new/tempest/etc/tempest.conf $NEWLOGTARGET/tempest_conf.txt
+ save_file $BASE/new/tempest/etc/tempest.conf ${NEWLOGPREFIX}tempest_conf.txt
+ save_file $BASE/new/tempest/etc/accounts.yaml ${NEWLOGPREFIX}accounts_yaml.txt
# Copy dstat CSV log if it exists
- if [ -f $BASE/new/dstat-csv.log ]; then
- sudo cp $BASE/new/dstat-csv.log $BASE/logs/
- fi
+ save_file $BASE/new/dstat-csv.log
sudo iptables-save > $WORKSPACE/iptables.txt
df -h > $WORKSPACE/df.txt
- pip freeze > $WORKSPACE/pip-freeze.txt
- sudo mv $WORKSPACE/iptables.txt $WORKSPACE/df.txt \
- $WORKSPACE/pip-freeze.txt $BASE/logs/
+ save_file $WORKSPACE/iptables.txt
+ save_file $WORKSPACE/df.txt
+
+ for py_ver in 2 3; do
+ if [[ `which python${py_ver}` ]]; then
+ python${py_ver} -m pip freeze > $WORKSPACE/pip${py_ver}-freeze.txt
+ save_file $WORKSPACE/pip${py_ver}-freeze.txt
+ fi
+ done
if [ `command -v dpkg` ]; then
dpkg -l> $WORKSPACE/dpkg-l.txt
@@ -849,36 +826,57 @@ function cleanup_host {
sudo mv $WORKSPACE/dpkg-l.txt.gz $BASE/logs/
fi
if [ `command -v rpm` ]; then
- rpm -qa > $WORKSPACE/rpm-qa.txt
+ rpm -qa | sort > $WORKSPACE/rpm-qa.txt
gzip -9 rpm-qa.txt
sudo mv $WORKSPACE/rpm-qa.txt.gz $BASE/logs/
fi
+ process_stackviz tempest
+
process_testr_artifacts tempest
process_testr_artifacts tempest old
- if [ -f $BASE/new/tempest/tempest.log ] ; then
- sudo cp $BASE/new/tempest/tempest.log $BASE/logs/tempest.log
- fi
- if [ -f $BASE/old/tempest/tempest.log ] ; then
- sudo cp $BASE/old/tempest/tempest.log $BASE/logs/old/tempest.log
- fi
+ save_file $BASE/new/tempest/tempest.log tempest.log
+ save_file $BASE/old/tempest/tempest.log old/tempest.log
# ceph logs and config
if [ -d /var/log/ceph ] ; then
sudo cp -r /var/log/ceph $BASE/logs/
fi
- if [ -f /etc/ceph/ceph.conf ] ; then
- sudo cp /etc/ceph/ceph.conf $BASE/logs/ceph_conf.txt
- fi
+ save_file /etc/ceph/ceph.conf
if [ -d /var/log/openvswitch ] ; then
sudo cp -r /var/log/openvswitch $BASE/logs/
fi
+ # glusterfs logs and config
+ if [ -d /var/log/glusterfs ] ; then
+ sudo cp -r /var/log/glusterfs $BASE/logs/
+ fi
+ save_file /etc/glusterfs/glusterd.vol glusterd.vol
+
# Make sure the current user can read all the logs and configs
- sudo chown -R $USER:$USER $BASE/logs/
- sudo chmod a+r $BASE/logs/ $BASE/logs/etc
+ sudo chown -RL $USER:$USER $BASE/logs/
+ # (note X not x ... execute/search only if the file is a directory
+ # or already has execute permission for some user)
+ sudo find $BASE/logs/ -exec chmod a+rX {} \;
+ # Remove all broken symlinks, which point to non existing files
+ # They could be copied by rsync
+ sudo find $BASE/logs/ -type l -exec test ! -e {} \; -delete
+
+ # Collect all the deprecation related messages into a single file.
+ # strip out date(s), timestamp(s), pid(s), context information and
+ # remove duplicates as well so we have a limited set of lines to
+ # look through. The fancy awk is used instead of a "sort | uniq -c"
+ # to preserve the order in which we find the lines in a specific
+ # log file.
+ grep -i deprecat $BASE/logs/*.log $BASE/logs/apache/*.log | \
+ sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}\.[0-9]{1,3}/ /g' | \
+ sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}/ /g' | \
+ sed -r 's/[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,4}/ /g' |
+ sed -r 's/\[.*\]/ /g' | \
+ sed -r 's/\s[0-9]+\s/ /g' | \
+ awk '{if ($0 in seen) {seen[$0]++} else {out[++n]=$0;seen[$0]=1}} END { for (i=1; i<=n; i++) print seen[out[i]]" :: " out[i] }' > $BASE/logs/deprecations.log
# rename files to .txt; this is so that when displayed via
# logs.openstack.org clicking results in the browser shows the
@@ -908,14 +906,6 @@ function cleanup_host {
done
fi
- # glusterfs logs and config
- if [ -d /var/log/glusterfs ] ; then
- sudo cp -r /var/log/glusterfs $BASE/logs/
- fi
- if [ -f /etc/glusterfs/glusterd.vol ] ; then
- sudo cp /etc/glusterfs/glusterd.vol $BASE/logs/
- fi
-
# final memory usage and process list
ps -eo user,pid,ppid,lwp,%cpu,%mem,size,rss,cmd > $BASE/logs/ps.txt
@@ -969,7 +959,7 @@ function enable_netconsole {
# out to the world is specify the default gw as the remote
# destination.
local default_gw=$(ip route | grep default | awk '{print $3}')
- local gw_mac=$(arp $default_gw | grep $default_gw | awk '{print $3}')
+ local gw_mac=$(arp -n $default_gw | grep $default_gw | awk '{print $3}')
local gw_dev=$(ip route | grep default | awk '{print $5}')
# turn up message output
@@ -1022,7 +1012,7 @@ function ovs_vxlan_bridge {
local ovs_package='openvswitch-switch'
local ovs_service='openvswitch-switch'
else
- echo "Unsupported platform, can't determine ntp service"
+ echo "Unsupported platform, can't determine openvswitch service"
exit 1
fi
local install_ovs_deps="source $BASE/new/devstack/functions-common; \
@@ -1041,6 +1031,8 @@ function ovs_vxlan_bridge {
shift 4
fi
local peer_ips=$@
+ # neutron uses 1:1000 with default devstack configuration, avoid overlap
+ local additional_vni_offset=1000000
eval $install_ovs_deps
# create a bridge, just like you would with 'brctl addbr'
# if the bridge exists, --may-exist prevents ovs from returning an error
@@ -1055,8 +1047,10 @@ function ovs_vxlan_bridge {
dev ${bridge_name}
fi
fi
+ sudo ip link set dev $bridge_name up
for node_ip in $peer_ips; do
- (( offset++ ))
+ offset=$(( offset+1 ))
+ vni=$(( offset + additional_vni_offset ))
# For reference on how to setup a tunnel using OVS see:
# http://openvswitch.org/support/config-cookbooks/port-tunneling/
# The command below is equivalent to the sequence of ip/brctl commands
@@ -1070,7 +1064,7 @@ function ovs_vxlan_bridge {
${bridge_name}_${node_ip} \
-- set interface ${bridge_name}_${node_ip} type=vxlan \
options:remote_ip=${node_ip} \
- options:key=${offset} \
+ options:key=${vni} \
options:local_ip=${host_ip}
# Now complete the vxlan tunnel setup for the Compute Node:
# Similarly this establishes the tunnel in the reverse direction
@@ -1081,7 +1075,7 @@ function ovs_vxlan_bridge {
${bridge_name}_${host_ip} \
-- set interface ${bridge_name}_${host_ip} type=vxlan \
options:remote_ip=${host_ip} \
- options:key=${offset} \
+ options:key=${vni} \
options:local_ip=${node_ip}
if [[ "$set_ips" == "True" ]] ; then
if ! remote_command $node_ip sudo ip addr show dev ${bridge_name} | \
@@ -1091,6 +1085,7 @@ function ovs_vxlan_bridge {
dev ${bridge_name}
fi
fi
+ remote_command $node_ip sudo ip link set dev $bridge_name up
done
}
@@ -1107,3 +1102,8 @@ function with_timeout {
function iniset {
$(source $BASE/new/devstack/inc/ini-config; iniset $@)
}
+
+# Iniget imported from devstack
+function iniget {
+ $(source $BASE/new/devstack/inc/ini-config; iniget $@)
+}
diff --git a/help/tempest-logs.html b/help/tempest-logs.html
index e76c339a..a8f2579c 100644
--- a/help/tempest-logs.html
+++ b/help/tempest-logs.html
@@ -73,7 +73,7 @@ Types of logs
screen-q-meta.txt.gz: neutron-metadata-agent
screen-q-metering.txt.gz: neutron-metering-agent
screen-q-svc.txt.gz: neutron-server
- screen-q-vpn.txt.gz: neutron-vpn-agent
+ screen-q-l3.txt.gz: neutron-l3-agent
swift
@@ -107,6 +107,7 @@ Types of logs
- devstack
diff --git a/multinode_setup_info.txt b/multinode_setup_info.txt
index 70b4bc81..31fc7487 100644
--- a/multinode_setup_info.txt
+++ b/multinode_setup_info.txt
@@ -38,9 +38,9 @@ Nova Network
||br_pub| | br_flat| | ||br_pub | |br_flat| | ||br_pub | |br_flat| |
|+--+---+ +---+----+ | |+---+---+ +---+---+ | |+---+---+ +---+---+ |
| | | | | | | | | | | |
-| | +-------------------gre-tunnel--+-----------------gre-tunnel---+ |
+| | +------------------vxlan-tunnel-+-----------------vxlan-tunnel-+ |
| | | | | | | | |
-| +----------gre-tunnel-----------+----------gre-tunnel----------+ |
+| +--------vxlan-tunnel-----------+--------vxlan-tunnel----------+ |
| | | | | |
+--------------------------+ +--------------------------+ +--------------------------+
@@ -72,9 +72,9 @@ This is what it all looks like after you run devstack and boot some nodes.
||br_pub| | br_flat| | ||br_pub | |br_flat| | ||br_pub | |br_flat| |
|+--+---+ +---+----+ | |+---+---+ +---+---+ | |+---+---+ +---+---+ |
| | | | | | | | | | | |
-| | +-------------------gre-tunnel--+-----------------gre-tunnel---+ |
+| | +------------------vxlan-tunnel-+-----------------vxlan-tunnel-+ |
| | | | | | | | |
-| +----------gre-tunnel-----------+----------gre-tunnel----------+ |
+| +--------vxlan-tunnel-----------+--------vxlan-tunnel----------+ |
| | | | | |
+--------------------------+ +--------------------------+ +--------------------------+
@@ -104,7 +104,7 @@ happens here.
|+--+---+ | |+---+---+ | |+---+---+ |
| | | | | | | | |
| | | | | | | | |
-| +----------gre-tunnel-----------+----------gre-tunnel----------+ |
+| +--------vxlan-tunnel-----------+--------vxlan-tunnel----------+ |
| | | | | |
+--------------------------+ +--------------------------+ +--------------------------+
@@ -139,6 +139,6 @@ This is what it all looks like after you run devstack and boot some nodes.
|+--+---+ | |+---+---+ | |+---+---+ |
| | | | | | | | |
| | | | | | | | |
-| +----------gre-tunnel-----------+----------gre-tunnel----------+ |
+| +--------vxlan-tunnel-----------+--------vxlan-tunnel----------+ |
| | | | | |
+--------------------------+ +--------------------------+ +--------------------------+
diff --git a/playbooks/devstack_gate_vars.yaml b/playbooks/devstack_gate_vars.yaml
new file mode 100644
index 00000000..62f5ff21
--- /dev/null
+++ b/playbooks/devstack_gate_vars.yaml
@@ -0,0 +1,6 @@
+---
+BASE: "{{ lookup('env', 'BASE')|default('/opt/stack', true) }}"
+CI_USER: "{{ lookup('env', 'CI_USER')|default(ansible_user_id, true) }}"
+PING_TIMES: 20
+HTTP_TIMES: 10
+PIP_CONFIG_FILE: "{{ lookup('env', 'PIP_CONFIG_FILE')|default('/etc/pip.conf', true) }}"
diff --git a/playbooks/plugins/callback/devstack.py b/playbooks/plugins/callback/devstack.py
new file mode 100644
index 00000000..0433ada6
--- /dev/null
+++ b/playbooks/plugins/callback/devstack.py
@@ -0,0 +1,141 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.vars import strip_internal_keys
+
+import datetime
+import yaml
+
+
+def _get_timestamp():
+ return str(datetime.datetime.now())[:-3]
+
+
+class CallbackModule(CallbackBase):
+
+ '''Callback plugin for devstack-gate.
+
+ Based on the minimal callback plugin from the ansible tree. Adds
+ timestamps to the start of the lines, squishes responses that are only
+ messages, returns facts in yaml not json format and strips facter facts
+ from the reported facts.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'devstack'
+
+ def _command_generic_msg(self, host, result, task, caption):
+ '''output the result of a command run'''
+
+ if caption == 'SUCCESS':
+ buf = "%s | %s | %s | %s >>\n" % (
+ _get_timestamp(), host, caption, task.get_name().strip())
+ else:
+ buf = "%s | %s | %s | %s | rc=%s >>\n" % (
+ _get_timestamp(), host, caption, task.get_name().strip(),
+ result.get('rc', 0))
+ buf += result.get('stdout', '')
+ buf += result.get('stderr', '')
+ buf += result.get('msg', '')
+
+ return buf + "\n"
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result:
+ self._display.display(
+ "An exception occurred during task execution."
+ " The full traceback is:\n" + result._result['exception'])
+
+ if result._task.action in C.MODULE_NO_JSON:
+ self._display.display(
+ self._command_generic_msg(
+ result._host.get_name(), result._result, result._task,
+ "FAILED"))
+ else:
+ self._display.display(
+ "%s | %s | FAILED! => %s" % (
+ _get_timestamp(),
+ result._host.get_name(), self._dump_results(
+ result._result, indent=4)))
+
+ def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
+ if 'ansible_facts' in result._result:
+ return
+ elif 'hostvars[inventory_hostname]' in result._result:
+ facts = result._result['hostvars[inventory_hostname]']
+ facter_keys = [k for k in facts.keys() if k.startswith('facter_')]
+ for key in facter_keys:
+ del facts[key]
+ result._result['ansible_facts'] = facts
+ self._display.display(
+ "%s | %s | Gathered facts:\n%s" % (
+ _get_timestamp(),
+ result._host.get_name(),
+ yaml.safe_dump(facts, default_flow_style=False)))
+ return
+
+ if result._task.action in C.MODULE_NO_JSON:
+ self._display.display(
+ self._command_generic_msg(
+ result._host.get_name(), result._result, result._task,
+ "SUCCESS"))
+ else:
+ if 'changed' in result._result and result._result['changed']:
+ self._display.display(
+ "%s | %s | SUCCESS => %s" % (
+ _get_timestamp(),
+ result._host.get_name(), self._dump_results(
+ result._result, indent=4)))
+ else:
+ abriged_result = strip_internal_keys(result._result)
+ if 'msg' in abriged_result and len(abriged_result.keys()) == 1:
+ result_text = result._result['msg']
+ else:
+ result_text = self._dump_results(result._result, indent=4)
+
+ self._display.display(
+ "%s | %s | %s | %s" % (
+ _get_timestamp(),
+ result._host.get_name(),
+ result._task.get_name().strip(),
+ result_text))
+ self._handle_warnings(result._result)
+
+ def v2_runner_on_skipped(self, result):
+ self._display.display(
+ "%s | %s | SKIPPED" % (
+ _get_timestamp(), result._host.get_name()))
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display(
+ "%s | %s | UNREACHABLE! => %s" % (
+ _get_timestamp(),
+ result._host.get_name(), self._dump_results(
+ result._result, indent=4)))
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result and result._result['diff']:
+ self._display.display(self._get_diff(result._result['diff']))
diff --git a/playbooks/roles/copy_mirror_config/tasks/main.yaml b/playbooks/roles/copy_mirror_config/tasks/main.yaml
new file mode 100644
index 00000000..385dcd4b
--- /dev/null
+++ b/playbooks/roles/copy_mirror_config/tasks/main.yaml
@@ -0,0 +1,12 @@
+- name: Get status of pydistutils.cfg file
+ stat: path={{ '~' + CI_USER | expanduser }}/.pydistutils.cfg
+ register: st
+- block:
+ - name: Install CI_USER .pydistutils on root home folder
+ command: install -D -m0644 -o root -g root {{ '~' + CI_USER | expanduser }}/.pydistutils.cfg /root/.pydistutils.cfg
+ - name: Install CI_USER .pydistutils on stack home folder
+ command: install -D -m0644 -o stack -g stack {{ '~' + CI_USER | expanduser }}/.pydistutils.cfg {{ BASE }}/new/.pydistutils.cfg
+ - name: Install CI_USER .pydistutils on tempest home folder
+ command: install -D -m0644 -o tempest -g tempest {{ '~' + CI_USER | expanduser }}/.pydistutils.cfg /home/tempest/.pydistutils.cfg
+ when: st.stat.exists
+ become: yes
diff --git a/playbooks/roles/create_base_folder/tasks/main.yaml b/playbooks/roles/create_base_folder/tasks/main.yaml
new file mode 100644
index 00000000..d9dbc068
--- /dev/null
+++ b/playbooks/roles/create_base_folder/tasks/main.yaml
@@ -0,0 +1,3 @@
+- name: Create BASE folder
+ file: path={{ BASE }} state=directory
+ become: yes
diff --git a/playbooks/roles/fix_etc_hosts/tasks/main.yaml b/playbooks/roles/fix_etc_hosts/tasks/main.yaml
new file mode 100644
index 00000000..0eb59df1
--- /dev/null
+++ b/playbooks/roles/fix_etc_hosts/tasks/main.yaml
@@ -0,0 +1,11 @@
+---
+- name: Check whether /etc/hosts contains hostname
+ command: grep {{ ansible_hostname }} /etc/hosts
+ changed_when: False
+ failed_when: False
+ register: grep_out
+
+- name: Add hostname to /etc/hosts
+ lineinfile: dest=/etc/hosts insertafter=EOF line='127.0.1.1 {{ ansible_hostname }}'
+ become: yes
+ when: grep_out.rc != 0
diff --git a/playbooks/roles/gather_host_info/tasks/main.yaml b/playbooks/roles/gather_host_info/tasks/main.yaml
new file mode 100644
index 00000000..09d599c6
--- /dev/null
+++ b/playbooks/roles/gather_host_info/tasks/main.yaml
@@ -0,0 +1,9 @@
+---
+# this is what prints the facts to the logs
+- debug: var=hostvars[inventory_hostname]
+
+- command: locale
+ name: "Gather locale"
+
+- command: cat /proc/cpuinfo
+ name: "Gather kernel cpu info"
diff --git a/playbooks/roles/network_sanity_check/tasks/http_check.yaml b/playbooks/roles/network_sanity_check/tasks/http_check.yaml
new file mode 100644
index 00000000..555d9a41
--- /dev/null
+++ b/playbooks/roles/network_sanity_check/tasks/http_check.yaml
@@ -0,0 +1,5 @@
+- name: Perform HTTP check
+ uri: url={{ url }}
+ register: uri_result
+ until: uri_result['status'] == 200
+ retries: "{{ HTTP_TIMES }}"
diff --git a/playbooks/roles/network_sanity_check/tasks/main.yaml b/playbooks/roles/network_sanity_check/tasks/main.yaml
new file mode 100644
index 00000000..091f91ce
--- /dev/null
+++ b/playbooks/roles/network_sanity_check/tasks/main.yaml
@@ -0,0 +1,28 @@
+---
+- name: Get status of file PIP_CONFIG_FILE
+ stat: path={{ PIP_CONFIG_FILE }}
+ register: st
+- block:
+ - name: Set pypi_url variable
+ set_fact: pypi_url={{ lookup('ini', 'index-url section=global file=' + PIP_CONFIG_FILE) }}
+ - name: Set pypi_host variable
+ set_fact: pypi_host={{ pypi_url.split('/')[2] }}
+ - include: ping_check.yaml host={{ pypi_host }}
+ - include: http_check.yaml url={{ pypi_url }}
+ when: st.stat.exists
+- name: Get NODEPOOL_MIRROR_HOST from /etc/nodepool/provider
+ shell: grep NODEPOOL_MIRROR_HOST /etc/nodepool/provider | cut -d "=" -f2
+ register: grep_mirror_host
+ changed_when: False
+- name: Get NODEPOOL_REGION from /etc/nodepool/provider
+ shell: grep NODEPOOL_REGION /etc/nodepool/provider | cut -d "=" -f2
+ register: grep_region
+ changed_when: False
+- name: Get NODEPOOL_CLOUD from /etc/nodepool/provider
+ shell: grep NODEPOOL_CLOUD /etc/nodepool/provider | cut -d "=" -f2
+ register: grep_cloud
+ changed_when: False
+- name: Build NODEPOOL_MIRROR_HOST variable with region and cloud if undefined
+ set_fact: NODEPOOL_MIRROR_HOST={{ grep_mirror_host.stdout|default("mirror." + grep_region.stdout|lower + "." + grep_cloud.stdout + ".openstack.org", true) }}
+- include: ping_check.yaml host={{ NODEPOOL_MIRROR_HOST }}
+- include: http_check.yaml url=http://{{ NODEPOOL_MIRROR_HOST }}/ubuntu/dists/trusty/Release
diff --git a/playbooks/roles/network_sanity_check/tasks/ping_check.yaml b/playbooks/roles/network_sanity_check/tasks/ping_check.yaml
new file mode 100644
index 00000000..57ae849d
--- /dev/null
+++ b/playbooks/roles/network_sanity_check/tasks/ping_check.yaml
@@ -0,0 +1,3 @@
+- name: Perform ping check
+ command: ping -c {{ PING_TIMES }} {{ host }}
+ changed_when: False
diff --git a/playbooks/roles/setup_stack_user/files/50_stack_sh b/playbooks/roles/setup_stack_user/files/50_stack_sh
new file mode 100644
index 00000000..4c6b46bd
--- /dev/null
+++ b/playbooks/roles/setup_stack_user/files/50_stack_sh
@@ -0,0 +1 @@
+stack ALL=(root) NOPASSWD:ALL
diff --git a/playbooks/roles/setup_stack_user/tasks/main.yaml b/playbooks/roles/setup_stack_user/tasks/main.yaml
new file mode 100644
index 00000000..6748d5a7
--- /dev/null
+++ b/playbooks/roles/setup_stack_user/tasks/main.yaml
@@ -0,0 +1,20 @@
+---
+- name: Create stack group
+ group: name=stack state=present
+ become: yes
+
+- name: Create stack user
+ user: name=stack shell=/bin/bash home={{ BASE }}/new group=stack
+ become: yes
+
+- name: Set home folder permissions
+ file: path={{ BASE }}/new mode=0755
+ become: yes
+
+- name: Copy 50_stack_sh file to /etc/sudoers.d
+ copy: src=50_stack_sh dest=/etc/sudoers.d mode=0440 owner=root group=root
+ become: yes
+
+- name: Create new/.cache folder within BASE
+ file: path={{ BASE }}/new/.cache state=directory owner=stack group=stack
+ become: yes
diff --git a/playbooks/roles/setup_tempest_user/files/51_tempest_sh b/playbooks/roles/setup_tempest_user/files/51_tempest_sh
new file mode 100644
index 00000000..f88ff9f4
--- /dev/null
+++ b/playbooks/roles/setup_tempest_user/files/51_tempest_sh
@@ -0,0 +1,3 @@
+tempest ALL=(root) NOPASSWD:/sbin/ip
+tempest ALL=(root) NOPASSWD:/sbin/iptables
+tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client
diff --git a/playbooks/roles/setup_tempest_user/tasks/main.yaml b/playbooks/roles/setup_tempest_user/tasks/main.yaml
new file mode 100644
index 00000000..b6af7ed2
--- /dev/null
+++ b/playbooks/roles/setup_tempest_user/tasks/main.yaml
@@ -0,0 +1,12 @@
+---
+- name: Create tempest group
+ group: name=tempest state=present
+ become: yes
+
+- name: Create tempest user
+ user: name=tempest shell=/bin/bash group=tempest
+ become: yes
+
+- name: Copy 51_tempest_sh to /etc/sudoers.d
+ copy: src=51_tempest_sh dest=/etc/sudoers.d owner=root group=root mode=0440
+ become: yes
diff --git a/playbooks/roles/start_fresh_logging/tasks/main.yaml b/playbooks/roles/start_fresh_logging/tasks/main.yaml
new file mode 100644
index 00000000..500537c1
--- /dev/null
+++ b/playbooks/roles/start_fresh_logging/tasks/main.yaml
@@ -0,0 +1,57 @@
+---
+- name: Check for /bin/journalctl file
+ command: which journalctl
+ changed_when: False
+ failed_when: False
+ register: which_out
+
+- block:
+ - name: Get current date
+ command: date +"%Y-%m-%d %H:%M:%S"
+ register: date_out
+
+ - name: Copy current date to log-start-timestamp.txt
+ copy:
+ dest: "{{ BASE }}/log-start-timestamp.txt"
+ content: "{{ date_out.stdout }}"
+ when: which_out.rc == 0
+ become: yes
+
+- block:
+ - name: Stop rsyslog
+ service: name=rsyslog state=stopped
+
+ - name: Save syslog file prior to devstack run
+ command: mv /var/log/syslog /var/log/syslog-pre-devstack
+
+ - name: Save kern.log file prior to devstack run
+ command: mv /var/log/kern.log /var/log/kern_log-pre-devstack
+
+ - name: Recreate syslog file
+ file: name=/var/log/syslog state=touch
+
+ - name: Recreate syslog file owner and group
+ command: chown /var/log/syslog --ref /var/log/syslog-pre-devstack
+
+ - name: Recreate syslog file permissions
+ command: chmod /var/log/syslog --ref /var/log/syslog-pre-devstack
+
+ - name: Add read permissions to all on syslog file
+ file: name=/var/log/syslog mode=a+r
+
+ - name: Recreate kern.log file
+ file: name=/var/log/kern.log state=touch
+
+ - name: Recreate kern.log file owner and group
+ command: chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack
+
+ - name: Recreate kern.log file permissions
+ command: chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack
+
+ - name: Add read permissions to all on kern.log file
+ file: name=/var/log/kern.log mode=a+r
+
+ - name: Start rsyslog
+ service: name=rsyslog state=started
+ when: which_out.rc == 1
+ become: yes
diff --git a/playbooks/setup_host.yaml b/playbooks/setup_host.yaml
new file mode 100644
index 00000000..2a140e5d
--- /dev/null
+++ b/playbooks/setup_host.yaml
@@ -0,0 +1,14 @@
+---
+- hosts: all
+ gather_facts: yes
+ vars_files:
+ - devstack_gate_vars.yaml
+ roles:
+ - gather_host_info
+ - fix_etc_hosts
+ - create_base_folder
+ - start_fresh_logging
+ - setup_stack_user
+ - setup_tempest_user
+ - copy_mirror_config
+ - network_sanity_check
diff --git a/test-features.sh b/test-features.sh
index cb1d3116..bdda8024 100755
--- a/test-features.sh
+++ b/test-features.sh
@@ -16,21 +16,13 @@
ERRORS=0
-TEMPEST_FULL_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,n-net"
+TEMPEST_FULL_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,n-novnc,n-cauth,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,peakmem_tracker,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,n-net,placement-api"
-TEMPEST_NEUTRON_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,quantum,q-svc,q-agt,q-dhcp,q-l3,q-meta,q-lbaas,q-fwaas,q-metering"
+TEMPEST_NEUTRON_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,n-novnc,n-cauth,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,peakmem_tracker,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,q-svc,q-agt,q-dhcp,q-l3,q-meta,q-metering,placement-api"
-TEMPEST_NEUTRON_KILO="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,quantum,q-svc,q-agt,q-dhcp,q-l3,q-meta,q-lbaas,q-fwaas,q-metering,q-vpn"
+TEMPEST_HEAT_SLOW_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,n-novnc,n-cauth,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,peakmem_tracker,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,q-svc,q-agt,q-dhcp,q-l3,q-meta,q-metering,placement-api"
-TEMPEST_HEAT_SLOW_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,quantum,q-svc,q-agt,q-dhcp,q-l3,q-meta,q-lbaas,q-fwaas,q-metering"
-
-GRENADE_NEW_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,n-net,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification"
-
-GRENADE_JUNO_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,n-net,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification"
-
-GRENADE_ICEHOUSE_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,n-net,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification"
-
-TEMPEST_FULL_JUNO="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,g-api,g-reg,key,horizon,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,tempest,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,n-net"
+GRENADE_NEW_MASTER="n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,n-novnc,n-cauth,g-api,g-reg,key,c-api,c-vol,c-sch,c-bak,cinder,s-proxy,s-account,s-container,s-object,mysql,rabbit,dstat,peakmem_tracker,tempest,n-net,ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api,ceilometer-alarm-notifier,ceilometer-alarm-evaluator,ceilometer-anotification,placement-api"
# Utility function for tests
function assert_list_equal {
@@ -57,21 +49,11 @@ function test_full_feature_ec {
assert_list_equal $TEMPEST_FULL_MASTER $results
}
-function test_full_juno {
- local results=$(DEVSTACK_GATE_TEMPEST=1 ./test-matrix.py -b stable/juno)
- assert_list_equal $TEMPEST_FULL_JUNO $results
-}
-
function test_neutron_master {
local results=$(DEVSTACK_GATE_NEUTRON=1 DEVSTACK_GATE_TEMPEST=1 ./test-matrix.py)
assert_list_equal $TEMPEST_NEUTRON_MASTER $results
}
-function test_neutron_kilo {
- local results=$(DEVSTACK_GATE_NEUTRON=1 DEVSTACK_GATE_TEMPEST=1 ./test-matrix.py -b stable/kilo)
- assert_list_equal $TEMPEST_NEUTRON_KILO $results
-}
-
function test_heat_slow_master {
local results=$(DEVSTACK_GATE_TEMPEST_HEAT_SLOW=1 DEVSTACK_GATE_NEUTRON=1 DEVSTACK_GATE_TEMPEST=1 ./test-matrix.py)
assert_list_equal $TEMPEST_HEAT_SLOW_MASTER $results
@@ -82,25 +64,11 @@ function test_grenade_new_master {
assert_list_equal $GRENADE_NEW_MASTER $results
}
-function test_grenade_juno_master {
- local results=$(DEVSTACK_GATE_GRENADE=pullup DEVSTACK_GATE_TEMPEST=1 ./test-matrix.py -b stable/juno)
- assert_list_equal $GRENADE_JUNO_MASTER $results
-}
-
-function test_grenade_icehouse_master {
- local results=$(DEVSTACK_GATE_GRENADE=pullup DEVSTACK_GATE_TEMPEST=1 ./test-matrix.py -b stable/icehouse)
- assert_list_equal $GRENADE_ICEHOUSE_MASTER $results
-}
-
test_full_master
test_full_feature_ec
test_neutron_master
-test_neutron_kilo
test_heat_slow_master
test_grenade_new_master
-test_grenade_juno_master
-test_grenade_icehouse_master
-test_full_juno
if [[ "$ERRORS" -ne 0 ]]; then
echo "Errors detected, job failed"
diff --git a/test-functions.sh b/test-functions.sh
index 80295eec..844e1486 100755
--- a/test-functions.sh
+++ b/test-functions.sh
@@ -515,6 +515,61 @@ function test_call_hook_if_defined {
rm -rf $save_dir
}
+# test that reproduce file is populated correctly
+function test_reproduce {
+ # expected result
+ read -d '' EXPECTED_VARS << EOF
+declare -x ZUUL_VAR="zuul-var"
+declare -x DEVSTACK_VAR="devstack-var"
+declare -x ZUUL_VAR_MULTILINE="zuul-var-setting1
+zuul-var-setting2"
+declare -x DEVSTACK_VAR_MULTILINE="devstack-var-setting1
+devstack-var-setting2"
+gate_hook ()
+{
+ echo "The cake is a lie"
+}
+declare -fx gate_hook
+EOF
+
+ # prepare environment for test
+ WORKSPACE=.
+ export DEVSTACK_VAR=devstack-var
+ export DEVSTACK_VAR_MULTILINE="devstack-var-setting1
+devstack-var-setting2"
+ export ZUUL_VAR=zuul-var
+ export ZUUL_VAR_MULTILINE="zuul-var-setting1
+zuul-var-setting2"
+ function gate_hook {
+ echo "The cake is a lie"
+ }
+ export -f gate_hook
+ JOB_NAME=test-job
+ mkdir $WORKSPACE/logs
+
+ # execute call and assert
+ reproduce
+
+ [[ -e $WORKSPACE/logs/reproduce.sh ]]
+ file_exists=$?
+ assert_equal $file_exists 0
+
+ result_expected=`cat $WORKSPACE/logs/reproduce.sh | grep "$EXPECTED_VARS"`
+ [[ ${#result_expected} -eq "0" ]]
+ assert_equal $? 1
+
+ # clean up environment
+ rm -rf $WORKSPACE/logs
+ rm -rf $WORKSPACE/workspace
+ unset WORKSPACE
+ unset DEVSTACK_VAR
+ unset DEVSTACK_VAR_MULTILINE
+ unset ZUUL_VAR
+ unset ZUUL_VAR_MULTILINE
+ unset JOB_NAME
+ unset gate_hook
+}
+
# Run tests:
#set -o xtrace
test_branch_override
@@ -530,6 +585,7 @@ test_periodic_no_branch
test_two_on_master
test_workspace_branch_arg
test_call_hook_if_defined
+test_reproduce
if [[ ! -z "$ERROR" ]]; then
echo
diff --git a/test-matrix.py b/test-matrix.py
index 48fff355..2210db38 100755
--- a/test-matrix.py
+++ b/test-matrix.py
@@ -65,7 +65,7 @@ def normalize_branch(branch):
def configs_from_env():
configs = []
- for k, v in os.environ.iteritems():
+ for k, v in os.environ.items():
if k.startswith('DEVSTACK_GATE_'):
if v not in FALSE_VALUES:
f = k.split('DEVSTACK_GATE_')[1]
@@ -73,22 +73,24 @@ def configs_from_env():
return configs
-def calc_services(branch, features):
+def calc_services(branch, features, role):
services = set()
for feature in features:
- services.update(GRID['features'][feature]['base'].get('services', []))
- if branch in GRID['features'][feature]:
+ grid_feature = GRID[role][feature]
+ services.update(grid_feature['base'].get('services', []))
+ if branch in grid_feature:
services.update(
- GRID['features'][feature][branch].get('services', []))
+ grid_feature[branch].get('services', []))
# deletes always trump adds
for feature in features:
+ grid_feature = GRID[role][feature]
services.difference_update(
- GRID['features'][feature]['base'].get('rm-services', []))
+ grid_feature['base'].get('rm-services', []))
- if branch in GRID['features'][feature]:
+ if branch in grid_feature:
services.difference_update(
- GRID['features'][feature][branch].get('rm-services', []))
+ grid_feature[branch].get('rm-services', []))
return sorted(list(services))
@@ -127,6 +129,10 @@ def get_opts():
parser.add_argument('-m', '--mode',
default="services",
help="What to return (services, compute-ext)")
+ parser.add_argument('-r', '--role',
+ default='primary',
+ help="What role this node will have",
+ choices=['primary', 'subnode'])
return parser.parse_args()
@@ -137,15 +143,16 @@ def main():
GRID = parse_features(opts.features)
ALLOWED_BRANCHES = GRID['branches']['allowed']
branch = normalize_branch(opts.branch)
+ role = opts.role
features = calc_features(branch, configs_from_env())
LOG.debug("Features: %s " % features)
- services = calc_services(branch, features)
+ services = calc_services(branch, features, role)
LOG.debug("Services: %s " % services)
if opts.mode == "services":
- print ",".join(services)
+ print(",".join(services))
if __name__ == "__main__":
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 00000000..d46548ad
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1 @@
+PyYAML>=3.1.0
diff --git a/tox.ini b/tox.ini
index 303fbcca..4e61e445 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,10 +7,24 @@ skipsdist = True
install_command = pip install -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
+[testenv:run-tests]
+deps = -r{toxinidir}/test-requirements.txt
+commands =
+ bash -c "./run-tests.sh"
+
+[testenv:py3-run-tests]
+basepython = python3
+deps = -r{toxinidir}/test-requirements.txt
+commands =
+ bash -c "./run-tests.sh"
+
[testenv:bashate]
-whitelist_externals =
- bash
deps=
- {env:BASHATE_INSTALL_PATH:bashate==0.3.1}
+ {env:BASHATE_INSTALL_PATH:bashate==0.5.0}
+whitelist_externals=
+ bash
+# bashate options:
+# -i E006 : ignore long lines
+# -e E005 : error if not starting with #!
commands =
- bash -c "ls *.sh | xargs bashate -v {posargs}"
+ bash -c "ls *.sh | xargs bashate -v {posargs} -iE006 -eE005"