Add ocata ovs deploy
diff --git a/tcp_tests/templates/common-services/virtual-mcp-ocata-ovs-common-services.yaml b/tcp_tests/templates/common-services/virtual-mcp-ocata-ovs-common-services.yaml
new file mode 100644
index 0000000..3d2789a
--- /dev/null
+++ b/tcp_tests/templates/common-services/virtual-mcp-ocata-ovs-common-services.yaml
@@ -0,0 +1,104 @@
+{% from 'virtual-mcp-ocata-ovs.jinja' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' cmd.run 'ip a | grep 172.16.10.2' | grep -B1 172.16.10.2
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/openstack/virtual-mcp-ocata-ovs-openstack.yaml b/tcp_tests/templates/openstack/virtual-mcp-ocata-ovs-openstack.yaml
new file mode 100644
index 0000000..d9b0202
--- /dev/null
+++ b/tcp_tests/templates/openstack/virtual-mcp-ocata-ovs-openstack.yaml
@@ -0,0 +1,158 @@
+{% from 'virtual-mcp-ocata-ovs.jinja' import HOSTNAME_CFG01 with context %}
+
+# Install OpenStack control services
+
+- description: Install keystone service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; keystone service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure glusterfs.client on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet tokens for keystone server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/salt/virtual-mcp-ocata-ovs-salt.yaml b/tcp_tests/templates/salt/virtual-mcp-ocata-ovs-salt.yaml
new file mode 100644
index 0000000..e382eb5
--- /dev/null
+++ b/tcp_tests/templates/salt/virtual-mcp-ocata-ovs-salt.yaml
@@ -0,0 +1,319 @@
+{% from 'virtual-mcp-ocata-ovs.jinja' import HOSTNAME_CFG01 with context %}
+
+# Install salt to the config node
+
+
+- description: Configure repository on the cfg01 node
+ cmd:
+ echo "172.18.248.114 jenkins.mcp.mirantis.net gerrit.mcp.mirantis.net" >> /etc/hosts;
+ echo "185.135.196.10 apt-mk.mirantis.com" >> /etc/hosts;
+ echo "nameserver 172.18.208.44 >> /etc/resolv.conf;
+ echo "nameserver 8.8.8.8 >> /etc/resolv.conf;
+ which wget >/dev/null || (apt-get update; apt-get install -y wget);
+ echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -;
+ echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Update packages on cfg01
+ cmd: apt-get clean; apt-get update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Installing salt master on cfg01
+ cmd: apt-get install -y reclass git; apt-get install -y salt-master
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Install common packages on cfg01
+ cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure salt-master on cfg01
+ cmd: |
+ cat << 'EOF' >> /etc/salt/master.d/master.conf
+ file_roots:
+ base:
+ - /usr/share/salt-formulas/env
+ pillar_opts: False
+ open_mode: True
+ reclass: &reclass
+ storage_type: yaml_fs
+ inventory_base_uri: /srv/salt/reclass
+ ext_pillar:
+ - reclass: *reclass
+ master_tops:
+ reclass: *reclass
+ EOF
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure GIT settings and certificates
+ cmd: touch /root/.git_trusted_certs.pem;
+ for server in git.tcpcloud.eu github.com; do
+ openssl s_client -showcerts -connect $server:443 </dev/null
+ | openssl x509 -outform PEM
+ >> /root/.git_trusted_certs.pem;
+ done;
+ HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+ HOME=/root git config --global user.email "tcp-qa@example.com";
+ HOME=/root git config --global user.name "TCP QA";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+
+- description: Clone reclass models with submodules
+ cmd: |
+ ssh-keyscan -H github.com >> ~/.ssh/known_hosts;
+ git clone -b master --recurse-submodules https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab /srv/salt/reclass;
+ mkdir -p /srv/salt/reclass/classes/service;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure reclass
+ cmd: |
+ FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
+ FORMULA_REPOSITORY=${FORMULA_REPOSITORY:-deb [arch=amd64] http://apt.tcpcloud.eu/nightly xenial tcp-salt};
+ FORMULA_GPG=${FORMULA_GPG:-http://apt.tcpcloud.eu/public.gpg};
+ which wget > /dev/null || (apt-get update; apt-get install -y wget);
+ echo "${FORMULA_REPOSITORY}" > /etc/apt/sources.list.d/tcpcloud_salt.list;
+ wget -O - "${FORMULA_GPG}" | apt-key add -;
+ apt-get clean; apt-get update;
+ [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
+ declare -a formula_services=("linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon");
+ echo -e "\nInstalling all required salt formulas\n";
+ apt-get install -y "${formula_services[@]/#/salt-formula-}";
+ for formula_service in "${formula_services[@]}"; do
+ echo -e "\nLink service metadata for formula ${formula_service} ...\n";
+ [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
+ done;
+ [ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env;
+ [ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd;
+ [ ! -d /etc/reclass ] && mkdir /etc/reclass;
+
+ cat << 'EOF' >> /etc/reclass/reclass-config.yml
+ storage_type: yaml_fs
+ pretty_print: True
+ output: yaml
+ inventory_base_uri: /srv/salt/reclass
+ EOF
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure salt-minion on cfg01
+ cmd: |
+ [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
+ cat << "EOF" >> /etc/salt/minion.d/minion.conf
+ id: {{ HOSTNAME_CFG01 }}
+ master: 127.0.0.1
+ EOF
+ apt-get install -y salt-minion;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure salt adoptors on cfg01
+ cmd: |
+ ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt;
+ chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Restart services
+ cmd: |
+ systemctl restart salt-master;
+ systemctl restart salt-minion;
+ echo "Showing system info and metadata ...";
+ salt-call --no-color grains.items;
+ salt-call --no-color pillar.data;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Prepare salt services and nodes settings
+- description: Run 'linux' formula on cfg01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls linux;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run 'openssh' formula on cfg01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls openssh;
+ salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh restart";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ '*' cmd.run "echo ' StrictHostKeyChecking no' >> /root/.ssh/config"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Run 'reclass' formula on cfg01
+ cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' reclass;
+ salt-call --no-color state.sls salt.master;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Run 'salt' formula on cfg01
+ cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls salt.master.service;
+ salt-call --no-color state.sls salt.master,salt.api,salt.minion.ca;
+ systemctl restart salt-minion;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Accept salt keys from all the nodes
+ cmd: salt-key -A -y
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls reclass
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Show reclass-salt --top
+ cmd: reclass-salt --top; salt-call --no-color state.sls salt.minion.cert -l info;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+# Bootstrap all nodes
+
+- description: Configure linux on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ linux
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure linux on proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'prx*' state.sls
+ linux
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure linux on gtw
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'gtw*' state.sls
+ linux
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 20}
+ skip_fail: false
+
+- description: Configure linux on cmp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.sls
+ linux
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 20}
+ skip_fail: false
+
+- description: Configure openssh on all nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
+ cfg*' state.sls openssh;salt --hard-crash --state-output=mixed --state-verbose=False
+ -C '* and not cfg*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure salt.minion on ctl
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Configure salt.minion on prx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'prx*' state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Configure salt.minion on gtw
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'gtw*' state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Configure salt.minion on cmp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Check salt minion versions on slaves
+ cmd: salt '*' test.version
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check salt top states on nodes
+ cmd: salt '*' state.show_top
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure ntp and rsyslog on nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls ntp,rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack gtw node
+ cmd: salt 'gtw*' cmd.run "ip addr del 172.16.10.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt 'cmp01*' cmd.run "ip addr del 172.16.10.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt 'cmp02*' cmd.run "ip addr del 172.16.10.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
diff --git a/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--meta-data.yaml b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--user-data-cfg01.yaml b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--user-data-cfg01.yaml
new file mode 100644
index 0000000..443e50c
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--user-data-cfg01.yaml
@@ -0,0 +1,98 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+ - sudo ifup ens4
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+ auto ens4
+ iface ens4 inet dhcp
+
+ - path: /root/.ssh/id_rsa
+ owner: root:root
+ permissions: '0600'
+ content: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEogIBAAKCAQEAqdHr4zmivHPEimCuK9vtATe4PvGEr0Np/JxYDlEQsr5Cajh4
+ tajxmZrjdAnJWFXVbmYl21sN1cUW0ltxB+9+lc4GNVNCZqE4kmpsyx2lrF7xCFvF
+ Qou26JYud/UCT9IpCYgWjQIGSC8gq1TzfgOpn6rWnLNSl3WdM5TKtQT7RXIkdSUw
+ kXFbObz9lsM+ULWNozCId2osJHj4zE0D3H5odU5DpcWLuSG0MmdxtWoQNJjSiPWt
+ HbRdvNmr/xeqcAfzdUdZxGf/VbXDdiNZn9TVv7UxxBHE812KNUf/Cvb5agDfEL7x
+ i2bWXbhr4jVTaDVr6MWl8Q7fAj79gdjQnUBWaQIDAQABAoIBAFU3kU6yIna9BViH
+ UX+S2ijtRBjZ68JjavEnp4xvo5h+nydcdT57q9lv/0nAi3g3gmXm/oJH+/ZU87HV
+ zy+zP+t+umDSChUkPBZFL5jxpKyN7BhMrP1KzRuEGYd6vJE/nfY5g095P5vDgnpX
+ o+SNg/YqrY1u8zgr/hnfRaV2/XyIDEEcQXTHseWTnnMQnULFU88xL8yq8ACT5GhK
+ 7A9m5ukfcU6d/fs/psz5Yqw5IQsWbv1yJ3/FKufPHlo2Nzh3/3eDAZUXvaBgf1so
+ FWFpHtkry3OXOGaZ98HgF9hL0twS0pzMvuypdGUQAt6nyB1N5re4LK/MAOddqwEc
+ 1+NQzfECgYEA2ryEf0GLJdtiYs3F4HbwTwJVIXdyWv7kjYGeMkutzzAjXl6wx8aq
+ kfqLJ7x7UkR5unZ1ajEbKBciAlSuFA+Gikn6a4Lv8h87aSnHpPd/2VSitRlI/gW7
+ w4U4CL3Br1JyonU5WA7VYfTow7KnHBhdwm27RMA9uosyIpveQRpqSG0CgYEAxsAS
+ wCQKrhuPq2YtGtFR7K4BL+N+0E1Vq6h49u1ukcgUe0GHVD3VzBypNCv7rWEVHzAg
+ biCVi7PCjzZYW4fYZmzVD4JbFLVGOUu7aJwLaE4wDe72DNr6YZhcS+Ta98BP+x0q
+ Wt34JNPDabRPfhXfhiCqnWjjod+4Zqx4VJVNgG0CgYB5EXL8xJhyAbW5Hk/x56Mm
+ +BGKjoR7HS3/rMiU6hJv5SMObrbGPI3YcqZm/gn8BO6jaEGg30E6tWMbiyc270j2
+ be/vZe/NQcAuevOHuX3IGvJb7nzaLO46UBgtrmnv0mCkzuFIfh1ZNKdI+i9Ie6wZ
+ m4bVjNod0EGVqlQgELDXGQKBgB+NNmzSS++/6FrpaZesSzkrlnynvOYMoOETacCp
+ iLgT70xx5q308w/oLORfZyDrHJNK7JsPCS6YZvadRgGh2zTHajuAEj2DWZaW8zV0
+ MEtqvi44FU+NI9qCeYSC3FAgc5IF20d5nX8bLxaEzWnSxx1f6jX7BMgZ4AhMsP2c
+ hiUxAoGAFaxn+t9blIjqUiuh0smSYFhLBVPZveYHQDmQYERjktptBd3X95fGnSKh
+ iDe2iPGyud2+Yu4X/VjHLh/MRru+ZXvPXw1XwEqX93q8a1n283ul0Rl9+KKKOVHR
+ eecTjI/BfXBf33mPRKny3xuHw6uwta2T3OXky9IhqYS1kkHiZWA=
+ -----END RSA PRIVATE KEY-----
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--user-data1604.yaml b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--user-data1604.yaml
new file mode 100644
index 0000000..1013571
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs--user-data1604.yaml
@@ -0,0 +1,98 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+ - sudo ifup ens4
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+ - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt" > /etc/apt/sources.list
+ - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+ # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
+ #- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+ #- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+ - apt-get clean
+ - apt-get update
+ - apt-get -y upgrade
+
+ # Install common packages
+ - apt-get install -y python-pip git
+ - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+ - apt-get install -y salt-minion
+
+ # To be configured from inventory/fuel-devops by operator or autotests
+ - 'echo "id: {hostname}" >> /etc/salt/minion'
+ - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
+
+ - echo "Restarting minion service with workarounds..."
+ - rm -f /etc/salt/pki/minion/minion_master.pub
+ - service salt-minion restart
+ - sleep 5
+ - rm -f /etc/salt/pki/minion/minion_master.pub
+ - service salt-minion restart
+
+ #- echo "Showing node metadata..."
+ #- salt-call pillar.data
+
+ #- echo "Running complete state ..."
+ #- salt-call state.sls linux,openssh,salt
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+ auto ens4
+ iface ens4 inet dhcp
+
diff --git a/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs.yaml b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs.yaml
new file mode 100644
index 0000000..0a27a36
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp-ocata-ovs.yaml
@@ -0,0 +1,405 @@
+---
+aliases:
+ default_interface_model:
+ - &interface_model !os_env INTERFACE_MODEL, virtio
+
+{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs') }}
+
+ address_pools:
+ private-pool01:
+ net: 172.16.10.0/24:24
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: 192.168.10.0/24:24
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: 10.1.0.0/24:24
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: 10.16.0.0/24:24
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: true
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: true
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data-cfg01.yaml
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data1604.yaml
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data1604.yaml
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data1604.yaml
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data1604.yaml
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data1604.yaml
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data1604.yaml
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include virtual-mcp-ocata-ovs--meta-data.yaml
+ cloudinit_user_data: !include virtual-mcp-ocata-ovs--user-data1604.yaml
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs.jinja b/tcp_tests/templates/virtual-mcp-ocata-ovs.jinja
new file mode 100644
index 0000000..69aa84c
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs.jinja
@@ -0,0 +1,2 @@
+{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}