Merge "Add retry for cinder volume"
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
new file mode 100644
index 0000000..2675136
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
@@ -0,0 +1,126 @@
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install nginx on prx nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the OpenStack control VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
+ echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..7e21691
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/lab04-physical-inventory.yaml
@@ -0,0 +1,83 @@
+nodes:
+ cfg01.cookied-bm-contrail40-nfv.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ # Physical nodes
+
+ kvm01.cookied-bm-contrail40-nfv.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.cookied-bm-contrail40-nfv.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.cookied-bm-contrail40-nfv.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp001.cookied-bm-contrail40-nfv.local:
+ reclass_storage_name: openstack_compute_node01
+ roles:
+ - openstack_compute_dpdk
+ - openstack_compute_sriov
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: single_mgm
+ deploy_address: 172.16.49.73
+ enp5s0f0:
+ role: single_contrail_dpdk_prv
+ tenant_address: 192.168.0.101
+ dpdk_pci: "'0000:05:00.0'"
+ dpdk_mac: '90:e2:ba:19:c2:18'
+ enp2s0f0:
+ role: single_vlan_ctl
+ single_address: 10.167.8.101
+
+ cmp002.cookied-bm-contrail40-nfv.local:
+ reclass_storage_name: openstack_compute_node02
+ roles:
+ - openstack_compute_dpdk
+ - openstack_compute_sriov
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: single_mgm
+ deploy_address: 172.16.49.74
+ enp5s0f0:
+ role: single_contrail_dpdk_prv
+ tenant_address: 192.168.0.102
+ dpdk_pci: "'0000:05:00.0'"
+ dpdk_mac: '00:1b:21:87:21:98'
+ enp2s0f0:
+ role: single_vlan_ctl
+ single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh b/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh
new file mode 100644
index 0000000..0fc5723
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/master_config.sh
@@ -0,0 +1,144 @@
+#!/bin/bash -xe
+
+export SALT_MASTER_DEPLOY_IP=${SALT_MASTER_DEPLOY_IP:-"172.16.49.66"}
+export SALT_MASTER_MINION_ID=${SALT_MASTER_MINION_ID:-"cfg01.cookied-bm-contrail40-nfv.local"}
+export DEPLOY_NETWORK_GW=${DEPLOY_NETWORK_GW:-"172.16.49.65"}
+export DEPLOY_NETWORK_NETMASK=${DEPLOY_NETWORK_NETMASK:-"255.255.255.192"}
+export DNS_SERVERS=${DNS_SERVERS:-"172.18.208.44"}
+export http_proxy=${http_proxy:-""}
+export https_proxy=${https_proxy:-""}
+export PIPELINES_FROM_ISO=${PIPELINES_FROM_ISO:-"false"}
+export PIPELINE_REPO_URL=${PIPELINE_REPO_URL:-"https://github.com/Mirantis"}
+export MCP_VERSION=${MCP_VERSION:-"proposed"}
+export MCP_SALT_REPO_KEY=${MCP_SALT_REPO_KEY:-"http://apt.mirantis.com/public.gpg"}
+export MCP_SALT_REPO_URL=${MCP_SALT_REPO_URL:-"http://apt.mirantis.com/xenial"}
+export MCP_SALT_REPO="deb [arch=amd64] $MCP_SALT_REPO_URL $MCP_VERSION salt"
+export FORMULAS="salt-formula-*"
+# Not avaible in 2018.4 and pre.
+export LOCAL_REPOS=false
+#for cloning from aptly image use port 8088
+#export PIPELINE_REPO_URL=http://172.16.47.182:8088
+
+function _apt_cfg(){
+ # TODO remove those function after 2018.4 release
+ echo "Acquire::CompressionTypes::Order gz;" >/etc/apt/apt.conf.d/99compression-workaround-salt
+ echo "Acquire::EnableSrvRecords false;" >/etc/apt/apt.conf.d/99enablesrvrecords-false
+ echo "Acquire::http::Pipeline-Depth 0;" > /etc/apt/apt.conf.d/99aws-s3-mirrors-workaround-salt
+ echo "APT::Install-Recommends false;" > /etc/apt/apt.conf.d/99dont_install_recommends-salt
+ echo "APT::Install-Suggests false;" > /etc/apt/apt.conf.d/99dont_install_suggests-salt
+ echo "Acquire::Languages none;" > /etc/apt/apt.conf.d/99dont_acquire_all_languages-salt
+ echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
+ echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
+ echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
+ echo "INFO: cleaning sources lists"
+ rm -rv /etc/apt/sources.list.d/* || true
+ echo > /etc/apt/sources.list || true
+}
+
+function _post_maas_cfg(){
+ local PROFILE=mirantis
+ # TODO: remove those check, and use only new version, adfter 2018.4 release
+ if [[ -f /var/lib/maas/.maas_login.sh ]]; then
+ /var/lib/maas/.maas_login.sh
+ else
+ echo "WARNING: Attempt to use old maas login schema.."
+ TOKEN=$(cat /var/lib/maas/.maas_credentials);
+ maas list | cut -d' ' -f1 | xargs -I{} maas logout {}
+ maas login $PROFILE http://127.0.0.1:5240/MAAS/api/2.0/ "${TOKEN}"
+ fi
+ # disable backports for maas enlist pkg repo
+ maas ${PROFILE} package-repository update 1 "disabled_pockets=backports"
+ maas ${PROFILE} package-repository update 1 "arches=amd64"
+ # Download ubuntu image from MAAS local mirror
+ if [[ "$LOCAL_REPOS" == "true" ]] ; then
+ maas ${PROFILE} boot-source-selections create 2 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
+ echo "WARNING: Removing default MAAS stream:"
+ maas ${PROFILE} boot-source read 1
+ maas ${PROFILE} boot-source delete 1
+ maas ${PROFILE} boot-resources import
+ # TODO wait for finish,and stop import.
+ else
+ maas ${PROFILE} boot-source-selections create 1 os="ubuntu" release="xenial" arches="amd64" subarches="*" labels="*"
+ maas ${PROFILE} boot-resources import
+ fi
+ while [ ! -d /var/lib/maas/boot-resources/current/ubuntu/amd64/generic/xenial ]
+ do
+ sleep 10
+ echo "WARNING: Image is still not ready"
+ done
+}
+
+### Body
+echo "Preparing metadata model"
+mount /dev/cdrom /mnt/
+cp -rT /mnt/model/model /srv/salt/reclass
+chown -R root:root /srv/salt/reclass/*
+chown -R root:root /srv/salt/reclass/.git* || true
+chmod -R 644 /srv/salt/reclass/classes/cluster/* || true
+chmod -R 644 /srv/salt/reclass/classes/system/* || true
+
+echo "Configuring salt"
+#service salt-master restart
+envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
+service salt-minion restart
+while true; do
+ salt-key | grep "$SALT_MASTER_MINION_ID" && break
+ sleep 5
+done
+sleep 5
+for i in $(salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"); do
+ salt-key -d $i -y
+done
+
+find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
+
+echo "updating git repos"
+if [[ "$PIPELINES_FROM_ISO" == "true" ]] ; then
+ cp -r /mnt/mk-pipelines/* /home/repo/mk/mk-pipelines/
+ cp -r /mnt/pipeline-library/* /home/repo/mcp-ci/pipeline-library/
+ umount /dev/cdrom || true
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
+else
+ umount /dev/cdrom || true
+ git clone --mirror "${PIPELINE_REPO_URL}/mk-pipelines.git" /home/repo/mk/mk-pipelines/
+ git clone --mirror "${PIPELINE_REPO_URL}/pipeline-library.git" /home/repo/mcp-ci/pipeline-library/
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
+fi
+
+echo "installing formulas"
+_apt_cfg
+curl -s $MCP_SALT_REPO_KEY | sudo apt-key add -
+echo $MCP_SALT_REPO > /etc/apt/sources.list.d/mcp_salt.list
+apt-get update
+apt-get install -y $FORMULAS
+rm -rf /srv/salt/reclass/classes/service/*
+cd /srv/salt/reclass/classes/service/;ls /usr/share/salt-formulas/reclass/service/ -1 | xargs -I{} ln -s /usr/share/salt-formulas/reclass/service/{};cd /root
+
+salt-call saltutil.refresh_pillar
+salt-call saltutil.sync_all
+if ! $(reclass -n ${SALT_MASTER_MINION_ID} > /dev/null ) ; then
+ echo "ERROR: Reclass render failed!"
+ exit 1
+fi
+
+salt-call state.sls linux.network,linux,openssh,salt
+salt-call -t5 pkg.install salt-master,salt-minion
+sleep 5
+salt-call state.sls salt
+# Sometimes, maas can stuck :(
+salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
+salt-call state.sls reclass,ntp
+
+_post_maas_cfg
+salt-call state.sls maas.cluster,maas.region || salt-call state.sls maas.cluster,maas.region
+
+ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true
+
+pillar=$(salt-call pillar.data jenkins:client)
+
+if [[ $pillar == *"job"* ]]; then
+ salt-call state.sls jenkins.client
+fi
+
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
new file mode 100644
index 0000000..8675650
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
@@ -0,0 +1,318 @@
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set PATTERN = os_env('PATTERN', 'false') %}
+{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# install contrail
+- description: Install docker for Opencontrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail requirements
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 20}
+ skip_fail: false
+
+- description: Spawn Opencontrail docker images
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls docker.client && sleep 15;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Finalize opencontrail services
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: true
+
+- description: Check contrail status
+ cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Opencontrail client on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 192.168.0.0/24 --name net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Allow all
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol tcp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --egress --protocol icmp';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; openstack security group rule create default --ingress --protocol icmp';
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on ctl
+ cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy
+ cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+ cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Reboot computes
+ cmd: |
+ salt "cmp*" system.reboot;
+ sleep 600;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Finalize contrail
+ cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Check status for contrail services
+ cmd: |
+ sleep 15;
+ salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: WR for having ability to use hostnames
+ cmd: |
+ CTL01_ADDRESS=`salt --out=newline_values_only 'ctl01*' network.interface_ip ens2`;
+ echo "$CTL01_ADDRESS ctl01.{{ DOMAIN_NAME }} ctl01" >> /etc/hosts;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
new file mode 100644
index 0000000..a9e33af
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
@@ -0,0 +1,256 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ openstack_nfv_dpdk_enabled: 'True'
+ openstack_nfv_sriov_enabled: 'True'
+ openstack_nfv_sriov_network: physnet1
+ openstack_nfv_sriov_numvfs: '7'
+ openstack_nfv_sriov_pf_nic: enp5s0f1
+ openstack_nova_compute_hugepages_count: '16'
+ openstack_nova_compute_nfv_req_enabled: 'True'
+ openstack_nova_cpu_pinning: 6,7,8,9,10,11
+
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-bm-4.0-contrail.local
+ cluster_name: cookied-bm-4.0-contrail
+ opencontrail_version: 4.0
+ linux_repo_contrail_component: oc40
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 8.8.4.4
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-51
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openldap_enabled: 'False'
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 172.16.49.66
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: influxdb
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ tenant_vlan: '2423'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-bm-4.0-contrail.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..1e06917
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,253 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ opencontrail_compute_iface: enp5s0f0
+ openstack_nfv_dpdk_enabled: 'True'
+ openstack_nfv_sriov_enabled: 'True'
+ openstack_nfv_sriov_network: physnet1
+ openstack_nfv_sriov_numvfs: '7'
+ openstack_nfv_sriov_pf_nic: enp5s0f1
+ openstack_nova_compute_hugepages_count: '16'
+ openstack_nova_compute_nfv_req_enabled: 'True'
+ openstack_nova_cpu_pinning: 6,7,8,9,10,11
+
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: cookied-bm-4.0-contrail.local
+ cluster_name: cookied-bm-4.0-contrail
+ opencontrail_version: 4.0
+ linux_repo_contrail_component: oc40
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 8.8.4.4
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-51
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: 172.16.49.66
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: influxdb
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ tenant_vlan: '2423'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: cookied-bm-4.0-contrail.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-environment.yaml
new file mode 100644
index 0000000..67fc8c3
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-environment.yaml
@@ -0,0 +1,271 @@
+nodes:
+ # Virtual Control Plane nodes
+ cid01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ntw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
new file mode 100644
index 0000000..6e6cfaa
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -0,0 +1,186 @@
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40-nfv') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "auditd" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: "Workaround for rack01 compute generator"
+ cmd: |
+ set -e;
+ # Remove rack01 key
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ # Add openstack_compute_node definition from system
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "WR for changing image to proposed"
+ cmd: |
+ set -e;
+ # Add message_queu host for opencontrail
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Update minion information
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun openssh after env model is generated
+ cmd: |
+ salt-call state.sls openssh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "WR for dpdk pci to be in correct quotes"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml;
+ reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Create VMs for control plane
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+ cmd: |
+ salt-key -l acc| sort > /tmp/current_keys.txt &&
+ salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 20, delay: 30}
+ skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Show reclass-salt --top for generated nodes
+ cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Lab04 workaround: Give each node root acces with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ salt '*' cmd.run "service sshd restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
+ cmd: |
+ set -e;
+ set -x;
+ KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
+ apt-get install -y sshuttle;
+ sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml
new file mode 100644
index 0000000..df0fbae
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/sl.yaml
@@ -0,0 +1,266 @@
+{% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install docker swarm
+- description: Configure docker service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install slv2 infra
+#Launch containers
+- description: Install Mongo if target matches
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 4, delay: 40}
+ skip_fail: false
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 4, delay: 10}
+ skip_fail: false
+
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Install telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 15}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 15}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..6c9e48f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
@@ -0,0 +1,102 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - apt-get update
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..106c3d5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,99 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml
new file mode 100644
index 0000000..915981e
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data1604.yaml
@@ -0,0 +1,95 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml
new file mode 100644
index 0000000..e99cee9
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay.yaml
@@ -0,0 +1,570 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail40-nfv') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
+{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-contrail40-nfv/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-contrail40-nfv/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-contrail40-nfv/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-bm-contrail40-nfv_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +62
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+ # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+ # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ #ip_ranges:
+ # dhcp: [+2, -4]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+
+ groups:
+
+ - name: virtual
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ # Ironic management interface
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ parent_iface:
+ phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+ #- label: ens4
+ # l2_network_device: private
+ # interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ #ens4:
+ # networks:
+ # - private
+
+
+ - name: default
+ driver:
+ name: devops_driver_ironic
+ params:
+ os_auth_token: fake-token
+ ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
+ # to access Ironic API
+ # Agent URL that is accessible from deploying node when nodes
+ # are bootstrapped with PXE. Usually PXE/provision network address is used.
+ agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+ agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+ network_pools:
+ admin: admin-pool01
+
+ nodes:
+
+ # - name: {{ HOSTNAME_CFG01 }}
+ # role: salt_master
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ # interfaces:
+ # - label: enp3s0f0 # Infra interface
+ # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+ # - label: enp3s0f1
+ # l2_network_device: admin
+ # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+ # network_config:
+ # enp3s0f0:
+ # networks:
+ # - infra
+ # enp3s0f1:
+ # networks:
+ # - admin
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ # - label: eno1
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+ # - label: eno2
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+ network_config:
+ # eno1:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ # - name: {{ HOSTNAME_KVM04 }}
+ # role: salt_minion
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_KVM04 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+ #
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+ #
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+ #
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ #
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data
+ #
+ # interfaces:
+ # # - label: eno1
+ # - label: enp2s0f0
+ # l2_network_device: admin
+ # mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
+ # # - label: eno2
+ # - label: enp2s0f1
+ # mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
+ #
+ # network_config:
+ # # eno1:
+ # enp2s0f0:
+ # networks:
+ # - admin
+ # bond0:
+ # networks:
+ # - control
+ # aggregation: active-backup
+ # parents:
+ # - enp2s0f1
+ #
+ - name: {{ HOSTNAME_CMP001 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
+
+ interfaces:
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: enp5s0f2
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+ # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+ network_config:
+ enp2s0f1:
+ networks:
+ - admin
+
+ - name: {{ HOSTNAME_CMP002 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
+
+ interfaces:
+ # - label: eno1
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+ # - label: eth0
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+ # - label: eth3
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ # - label: eth2
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: eth4
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+ # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+ network_config:
+ enp2s0f1:
+ networks:
+ - admin
+
+ # - name: {{ HOSTNAME_CMP003 }}
+ # role: salt_minion
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_CMP003 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+ #
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ # cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+ #
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+ #
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ #
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_hwe
+ #
+ # interfaces:
+ # # - label: eno1
+ # - label: enp2s0f1
+ # mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
+ # # - label: eth0
+ # - label: enp2s0f0
+ # l2_network_device: admin
+ # mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
+ #
+ # network_config:
+ # enp2s0f0:
+ # networks:
+ # - admin
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
index ff8340b..f8f1a2d 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
@@ -62,6 +62,8 @@
enp3s0f1:
role: bond_dpdk_prv_lacp
dpdk_pci: "0000:03:00.1"
+ # Remove this interface after switching to reclass 1.5.x
+ # in favor to system.nova.compute.nfv.sriov
enp5s0f1:
role: sriov
@@ -83,6 +85,8 @@
enp3s0f1:
role: bond_dpdk_prv_lacp
dpdk_pci: "0000:03:00.1"
+ # Remove this interface after switching to reclass 1.5.x
+ # in favor to system.nova.compute.nfv.sriov
enp5s0f1:
role: sriov
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
index 13e465f..69d67ea 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
@@ -14,6 +14,19 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+- description: "Workaround for PROD-22201 (remove after switching to reclass 1.5.x) - Remove linux.network.interface object from the system models and use fixed 'environment' model instead"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
index 802c2ee..0bdcf96 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
@@ -62,6 +62,8 @@
enp3s0f1:
role: bond_dpdk_prv_lacp
dpdk_pci: "0000:03:00.1"
+ # Remove this interface after switching to reclass 1.5.x
+ # in favor to system.nova.compute.nfv.sriov
enp5s0f1:
role: sriov
@@ -83,6 +85,8 @@
enp3s0f1:
role: bond_dpdk_prv_lacp
dpdk_pci: "0000:03:00.1"
+ # Remove this interface after switching to reclass 1.5.x
+ # in favor to system.nova.compute.nfv.sriov
enp5s0f1:
role: sriov
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index 7f20040..93f5a14 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -14,6 +14,19 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+- description: "Workaround for PROD-22201 (remove after switching to reclass 1.5.x) - Remove linux.network.interface object from the system models and use fixed 'environment' model instead"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
index 3cd7ae5..a42b5f0 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
@@ -34,8 +34,8 @@
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
# Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address ${_param:openstack_compute_rack01_single_subnet}.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address ${_param:openstack_compute_rack01_single_subnet}.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index 9c7716e..7ec7a36 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -30,8 +30,8 @@
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
# Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address ${_param:openstack_compute_rack01_single_subnet}.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address ${_param:openstack_compute_rack01_single_subnet}.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
index 6cbffdd..62da3ec 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -36,8 +36,8 @@
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
# Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address ${_param:openstack_compute_rack01_single_subnet}.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address ${_param:openstack_compute_rack01_single_subnet}.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index 2ff88bd..c727bb1 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -34,8 +34,8 @@
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
# Workaround for compute nodes addresses
- reclass-tools add-key parameters._param.openstack_compute_node01_address ${_param:openstack_compute_rack01_single_subnet}.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_address ${_param:openstack_compute_rack01_single_subnet}.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/tests/system/test_install_mcp_sl_os.py b/tcp_tests/tests/system/test_install_mcp_sl_os.py
index 6e5452c..58ad9c7 100644
--- a/tcp_tests/tests/system/test_install_mcp_sl_os.py
+++ b/tcp_tests/tests/system/test_install_mcp_sl_os.py
@@ -47,6 +47,28 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ def test_mcp_os_mitaka_install(self, underlay, openstack_deployed,
+ openstack_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN,
+ conf_name='lvm_mcp_mitaka.conf')
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
def test_mcp_sl_os_install(self, underlay, config, openstack_deployed,
stacklight_deployed, openstack_actions):
"""Test for deploying an mcp environment and check it
diff --git a/tcp_tests/tests/system/test_install_mcp_trusty.py b/tcp_tests/tests/system/test_install_mcp_trusty.py
index c86c79c..c91d3a4 100644
--- a/tcp_tests/tests/system/test_install_mcp_trusty.py
+++ b/tcp_tests/tests/system/test_install_mcp_trusty.py
@@ -42,7 +42,8 @@
if settings.RUN_TEMPEST:
openstack_actions.run_tempest(pattern=settings.PATTERN,
- target='cfg01')
+ target='cfg01',
+ conf_name='lvm_mcp_mitaka.conf')
openstack_actions.download_tempest_report(stored_node='cfg01')
LOG.info("*************** DONE **************")