Add model cookied-bm-contrail with MaaS
Change-Id: I8f29d684a5de003f2f1788cfb8c56a8ef819eae0
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index f90000c..24c6c0b 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -59,3 +59,4 @@
PATTERN = os.environ.get('PATTERN', None)
RUN_TEMPEST = get_var_as_bool('RUN_TEMPEST', False)
+RUN_SL_TESTS = get_var_as_bool('RUN_SL_TESTS', False)
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml
new file mode 100644
index 0000000..0aebf89
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..69a9df0
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml
@@ -0,0 +1,77 @@
+nodes:
+ cfg01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ # Physical nodes
+
+ kvm01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp001.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_compute_node01
+ roles:
+ - openstack_compute
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: single_mgm
+ deploy_address: 172.16.49.73
+ enp5s0f0:
+ role: bond0_ab_contrail
+ tenant_address: 192.168.0.101
+ enp5s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.101
+
+ cmp002.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_compute_node02
+ roles:
+ - openstack_compute
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: single_mgm
+ deploy_address: 172.16.49.74
+ enp5s0f0:
+ role: bond0_ab_contrail
+ tenant_address: 192.168.0.102
+ enp5s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml b/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml
new file mode 100644
index 0000000..ed0191c
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml
@@ -0,0 +1,111 @@
+classes:
+- system.linux.system.repo.mcp.apt_mirantis.maas
+- system.maas.region.single
+parameters:
+ _param:
+ maas_region_port: 5240
+ maas_cluster_region_port: ${_param:maas_region_port}
+ power_user: mcp-qa
+ power_password: password
+ maas:
+ cluster:
+ region:
+ host: ${_param:deploy_address}:${_param:maas_cluster_region_port}
+ region:
+ bind:
+ host: ${_param:deploy_address}:${_param:maas_region_port}
+ subnets:
+ 172.16.49.64/26:
+ cidr: 172.16.49.64/26
+ fabric: fabric-51
+ gateway_ip: 172.16.49.65
+ iprange:
+ end: 172.16.49.119
+ start: 172.16.49.77
+ fabrics:
+ fabric-51:
+ description: Fabric for deploy
+ #commissioning_scripts:
+ # 00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
+ machines:
+ kvm01: # cz7341-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:33:1f:e4"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:33:1f:e4"
+ mode: "static"
+ ip: ${_param:infra_kvm_node01_deploy_address}
+ subnet: "10.10.0.0/16" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.161"
+ power_password: ${_param:power_password}
+ power_type: ipmi
+ power_user: ${_param:power_user}
+ kvm02: # #cz7342-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:33:20:fc"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:33:20:fc"
+ mode: "static"
+ ip: ${_param:infra_kvm_node02_deploy_address}
+ subnet: "10.10.0.0/16" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.162"
+ power_password: ${_param:power_password}
+ power_type: ipmi
+ power_user: ${_param:power_user}
+ kvm03: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:31:fb:b6"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:31:fb:b6"
+ mode: "static"
+ ip: ${_param:infra_kvm_node03_deploy_address}
+ subnet: "10.10.0.0/16" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.163"
+ power_password: ${_param:power_password}
+ power_type: ipmi
+ power_user: ${_param:power_user}
+ cmp001: # #cz7345-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:31:f0:12"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:31:f0:12"
+ mode: "static"
+ ip: ${_param:infra_kvm_node04_deploy_address}
+ subnet: "10.10.0.0/16" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.17"
+ power_password: ${_param:power_password}
+ power_type: ipmi
+ power_user: ${_param:power_user}
+ cmp002: # cz7346-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:31:ef:bc"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:31:ef:bc"
+ mode: "static"
+ ip: ${_param:infra_kvm_node05_deploy_address}
+ subnet: "10.10.0.0/16" # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ power_address: "185.8.59.18"
+ power_password: ${_param:power_password}
+ power_type: ipmi
+ power_user: ${_param:power_user}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
new file mode 100644
index 0000000..0fed1c0
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
@@ -0,0 +1,294 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# install contrail
+- description: Install Opencontrail db on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail db on all nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail control on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Opencontrail control on all nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Opencontrail on collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Workaround for https://mirantis.jira.com/browse/PROD-12798
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' service.restart 'keepalived'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# OpenContrail vrouters
+- description: Install Opencontrail client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Opencontrail client on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install Opencontrail on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls 'opencontrail'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Test Opencontrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' cmd.run 'contrail-status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on ctl
+ cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy
+ cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Hack vrouter (Delete default moun point)
+ cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Temporary WR for correct pci in vrouter.conf
+ cmd: salt "cmp*" cmd.run "sed -i 's/physical\_interface\_address\=.*/physical\_interface\_address=0000\:05\:00\.0/g' /etc/contrail/contrail-vrouter-agent.conf"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+ cmd: salt "cmp*" cmd.run "rm -rf /var/crashes/*"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Reboot computes
+ cmd: salt --timeout=600 "cmp*" system.reboot
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml b/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..e053de3
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,202 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA1Hbf7nJ3VGyRxqwPNqnDcspyuJXf0WwuEJJyDxATV0JTuZSz
+ jcT4A1XLN/WG8diN0Q5/tYRpuSxNKaz3nPxXjTqK1byrB7jGBhNQeWYHMNoBH3VR
+ Kzm4yBGIKaG6k5wYB5kS950zxNHIKQ7Mo4+0WloIlgZTkMrQo98FaD1X9jyVJm7O
+ TfIqTwYSJb4TJf/hgL8xntpGC6gRjZDQzlxB25bSP2+u3KzfUyDiaK8hs5PWwnNn
+ KfvZmYZmsgA4+D+dQh4YkyfDN6hQL4ttW+2SRpZ/js+KTkqU1Vyn5P5cbf2RwH0G
+ VewWyYA2ZK7nXPYH7+ia1rPj6wO3UXEFnxVzowIDAQABAoIBAQDCgenURFrWoWZ7
+ ub1bz+MszgZk0mDLOvyZE1j0TUVHl2NK/MW8vlxHwV2AZ3kZI6YBhAKKzSR08Afc
+ ZYty3tnQY44CyuzQ7unrWfdMjIl8wbhRcnfS2M8/6jz70CIdTMP7ALqKkhJ4140l
+ eXUDMGZuaQp/Pl92qRaFT4GFwtMqivQobCX5/yehd3+mMu8CkK+1U0T/9gWacEdv
+ JISAkfpOGkXLmZ/ekkqNFfv6SrNaefaYjqMeGk3ZrFmPAEstqxo0tiUjV5BYXCME
+ SsJCF/EnDHxMYzwkqCCGrXY0DbXGqF1B+dO8MqeYTIYyohEJ82vkezP5BqM4L/3Z
+ Lec3ypPhAoGBAPh0eBKNgLOhMz+h1DnjdTJAcsa58ZQCH6r+pTVIhwmbCT/3RTJT
+ oizYVhtlQyi0lHsxMwtYx+oj1Qacs7jb6UH24d6+oX0JZr/lfxqZVvgmTOoNED3l
+ ZdX3xU4GOMhWXS7IEhk750LFGF1k7QLcoULx3u/8dZNgW0kNZUsptkxpAoGBANrq
+ m9CFnbzSm20EicfT8FX7Hu6Wl3Lgwnsgc+7+dkm8uST+DdDWjUymkTjeIq/8Z/va
+ I+rhURp+r7tLFAy+sC6YeLZ7oHcUIfUwXOdlIJOpdVayF5oiZ5qCSkNcT2oJfFdJ
+ Uf3/gwQDxg6CKJIKdnyK1njnfldfoFLZz9Z1Ze4rAoGBAMhfde7Qe/liiihJZRUC
+ oiPS4j3u/Ct3wv5uu+JLCczvYfhafU3nMSWlm1wgwJb1e8IWnaoLAb+NAmKAwljV
+ 0jrG1saDS03B5UHh3i4feIpMqT8hJfYlKYn0dwVD80tui1wNMrtzGkE5HztDB/qE
+ 4PFSi49UNaaT0UsLKKQDkefxAoGAf91+iwowOuTsoX2AGHajLyVRSNwus4uyLIal
+ EJgScTlJDuFRIoTe3UGBGy0sJ4yPE9yzE/LtE0Oh0wykNll+wIiQIU4OSN86gmLw
+ MLuxjm3xOmUlQgMMboPhanzVacMGnFkYCfqfBM5LdZfyqHJyCIZzhQT5l4EkPKA6
+ NDI4CicCgYAtaC63kRjv8NNWo1iuovOLF4pdnYEviT37s24zlf18IKLZTW66AGbX
+ 2lLHBiS7SyLEvIpn8/Vwh185CbTitlfRpU5bPzwk4dvKPgUa0eiov/Voe+WPWafQ
+ +uErJ3mt7l+dThL1q70aD6Dl1pbMjG5xbIKSmXNrmrrMVN8+pM2BJQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUdt/ucndUbJHGrA82qcNyynK4ld/RbC4QknIPEBNXQlO5lLONxPgDVcs39Ybx2I3RDn+1hGm5LE0prPec/FeNOorVvKsHuMYGE1B5Zgcw2gEfdVErObjIEYgpobqTnBgHmRL3nTPE0cgpDsyjj7RaWgiWBlOQytCj3wVoPVf2PJUmbs5N8ipPBhIlvhMl/+GAvzGe2kYLqBGNkNDOXEHbltI/b67crN9TIOJoryGzk9bCc2cp+9mZhmayADj4P51CHhiTJ8M3qFAvi21b7ZJGln+Oz4pOSpTVXKfk/lxt/ZHAfQZV7BbJgDZkrudc9gfv6JrWs+PrA7dRcQWfFXOj
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: cookied-bm-contrail-maas.local
+ cluster_name: cookied-bm-contrail-maas
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: ADkYzQUNzQcfbBH3McX2MLVK7EkgliUBuVRvaw6e4pFcRtbkawgc9FTHFaw1L5Eh
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.65
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 8.8.4.4
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 172.16.49.66
+ maas_deploy_cidr: 172.16.49.64/26
+ maas_deploy_gateway: 172.16.49.65
+ maas_deploy_range_end: 172.16.49.119
+ maas_deploy_range_start: 172.16.49.77
+ maas_deploy_vlan: '0'
+ maas_dhcp_enabled: 'True'
+ maas_fabric_name: fabric-51
+ maas_hostname: cfg01
+ maas_manage_deploy_network: 'True'
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: 10.167.8.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.8.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.8.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.8.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.8.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.8.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.8.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.8.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.8.100
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.8.101
+ opencontrail_router02_hostname: rtr02
+ openssh_groups: ''
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: ocata
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: 981k0rv2KevPhpyyy3BgZK8cNZWUjifx
+ salt_api_password_hash: $6$kpjwqhVv$gUQV0XYxXNUu3ESKSmE1s.eDAaYunerIsF3DdzjvMqCRiH7DdOWuun/pdjSVp.jjKHYsb0GimXyUh6sX/77PM/
+ salt_master_address: 172.16.49.66
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.8.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: influxdb
+ stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ tenant_vlan: '2423'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..1b560f4
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml
@@ -0,0 +1,245 @@
+nodes:
+ # Virtual Control Plane nodes
+
+ ctl01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ nal02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ nal03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ ntw01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ ntw02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ ntw03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ mtr01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-contrail-maas.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+# bmk01.cookied-bm-contrail-maas.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
new file mode 100644
index 0000000..2888a65
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
@@ -0,0 +1,231 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import REPOSITORY_SUITE with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import ETH1_IP_ADDRESS_CFG01 with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: Upload maas template
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: maas.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: "Workaround for rack01 compute generator"
+ cmd: |
+ set -e;
+ # Remove rack01 key
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ # Add openstack_compute_node definition from system
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "WR for changing image to proposed"
+ cmd: |
+ set -e;
+ # Add message_queu host for opencontrail
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update minion information
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun openssh after env model is generated
+ cmd: |
+ salt-call state.sls openssh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure rsyslog on nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure maas.cluster
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.cluster
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure maas.region
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.region
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Configure dhcp for fabric
+ cmd: |
+ touch /root/API_KEY_FILE;
+ export PROFILE=mirantis;
+ export API_KEY_FILE=/root/API_KEY_FILE;
+ export MAAS_URL=http://{{ ETH1_IP_ADDRESS_CFG01 }}:5240/MAAS;
+ maas-region apikey --username=$PROFILE > $API_KEY_FILE;
+ maas login $PROFILE $MAAS_URL - < $API_KEY_FILE;
+ maas $PROFILE ipranges create type=dynamic start_ip={{ MAAS_DHCP_POOL_START }} end_ip={{ MAAS_DHCP_POOL_END }} comment='Reserved dynamic range for HW and VCP nodes'
+ maas $PROFILE ipranges create type=reserved start_ip={{ ETH1_IP_ADDRESS_CFG01 }} end_ip={{ ETH1_IP_ADDRESS_CFG01 }} comment='This is a reserved IP for cfg with maas node';
+ maas $PROFILE vlan update 51 0 dhcp_on=True primary_rack=cfg01;
+ maas $PROFILE nodes read |grep status -A 1 -B 1;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Comissioning nodes
+ cmd: |
+ salt-call state.apply maas.machines;
+ salt-call state.apply maas.machines.wait_for_ready;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Assign IPs
+ cmd: |
+ salt-call state.sls maas.machines.assign_ip;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Deploying BM nodes
+ cmd: |
+ salt-call maas.deploy_machines || true
+ salt-call state.apply maas.machines.wait_for_deployed;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure ntp on all nodes
+ cmd: |
+ salt '*' state.sls ntp;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Syncing before salt control state
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Create VMs for control plane
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+ cmd: |
+ salt-key -l acc| sort > /tmp/current_keys.txt &&
+ salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 20, delay: 30}
+ skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Show reclass-salt --top for generated nodes
+ cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml
new file mode 100644
index 0000000..4748f8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml
@@ -0,0 +1,243 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+# Install docker swarm
+- description: Configure docker service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install slv2 infra
+#Launch containers
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Install telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 15}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..1e318a5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
@@ -0,0 +1,69 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ - sudo route add default gw {gateway} ens3
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+
+ - export MAAS_PXE_INTERFACE_NAME=ens4
+ - export MAAS_PXE_INTERFACE_ADDRESS={{ os_env('MAAS_PXE_INTERFACE_ADDRESS', '172.16.49.66') }}
+ - export MAAS_DHCP_POOL_NETMASK={{ os_env('MAAS_DHCP_POOL_NETMASK', '255.255.255.192') }}
+ - export MAAS_DHCP_POOL_NETMASK_PREFIX={{ os_env('MAAS_DHCP_POOL_NETMASK_PREFIX', '26') }}
+ - export MAAS_DHCP_POOL_START={{ os_env('MAAS_DHCP_POOL_START', '172.16.49.77') }}
+ - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
+ - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
new file mode 100644
index 0000000..cdab801
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
@@ -0,0 +1,132 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-bm-contrail-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-contrail-maas/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail-maas') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+
+{% set MAAS_DHCP_POOL_START = os_env('MAAS_DHCP_POOL_START', '172.16.49.77') %}
+{% set MAAS_DHCP_POOL_END = os_env('MAAS_DHCP_POOL_END', '172.16.49.119') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-bm-contrail-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('MAAS_ADMIN_ADDRESS_POOL01', '10.50.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +2
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ provisioning-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +61
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: +2
+ virtual_{{ HOSTNAME_CFG01 }}: +2
+ ip_ranges:
+ dhcp: [+12, +55]
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ provisioning: provisioning-pool01
+
+ l2_network_devices:
+ # Ironic management interface
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ provisioning:
+ address_pool: provisioning-pool01
+ dhcp: false
+ forward:
+ mode: !os_env MAAS_PXE_IFACE_MODE, bridge
+ parent_iface:
+ phys_dev: !os_env MAAS_PXE_IFACE
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 180
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+ - label: ens4
+ l2_network_device: provisioning
+ interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - provisioning
diff --git a/tcp_tests/tests/system/test_opencontrail.py b/tcp_tests/tests/system/test_opencontrail.py
index 39d3d59..91d7789 100644
--- a/tcp_tests/tests/system/test_opencontrail.py
+++ b/tcp_tests/tests/system/test_opencontrail.py
@@ -47,9 +47,61 @@
openstack_deployed.download_tempest_report(stored_node='ctl01')
LOG.info("*************** DONE **************")
- # opencontrail.prepare_tests(
- # config.opencontrail.opencontrail_prepare_tests_steps_path)
+ @pytest.mark.fail_snapshot
+ @pytest.mark.with_rally(rally_node="ctl01.")
+ def test_opencontrail_maas(self, config, underlay, salt_actions,
+ openstack_deployed, show_step, sl_deployed):
+ """Runner for Juniper contrail-tests
- # opencontrail.run_tests(
- # tags=config.opencontrail.opencontrail_tags,
- # features=config.opencontrail.opencontrail_features)
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+ 5. Exporting results
+ 6. Check docker services
+ 7. Run SL tests
+ 8. Download sl tests report
+ """
+ openstack_deployed._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ show_step(4)
+ openstack_deployed.run_tempest(target='ctl01',
+ pattern=settings.PATTERN)
+ openstack_deployed.download_tempest_report(stored_node='ctl01')
+
+ expected_service_list = ['monitoring_server',
+ 'monitoring_remote_agent',
+ 'dashboard_grafana',
+ 'monitoring_alertmanager',
+ 'monitoring_remote_collector',
+ 'monitoring_pushgateway']
+ mon_nodes = sl_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+
+ prometheus_relay_enabled = salt_actions.get_pillar(
+ tgt=mon_nodes[0],
+ pillar="prometheus:relay:enabled")[0]
+ if not prometheus_relay_enabled:
+ # InfluxDB is used if prometheus relay service is not installed
+ expected_service_list.append('monitoring_remote_storage_adapter')
+ show_step(6)
+ sl_deployed.check_docker_services(mon_nodes, expected_service_list)
+ # Run SL component tetsts
+ if settings.RUN_SL_TESTS:
+ show_step(7)
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+ show_step(8)
+ # Download report
+ sl_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+
+ LOG.info("*************** DONE **************")