Remove old templates
Change-Id: I29bb5f560f78c57de97835fcf1760937e52599e7
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
deleted file mode 100644
index 16b73bd..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
+++ /dev/null
@@ -1,223 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- designate_backend: bind
- ceph_cluster_network: 172.16.10.0/24
- ceph_enabled: 'True'
- ceph_hyper_converged: 'False'
- ceph_mon_node01_address: 172.16.10.66
- ceph_mon_node01_hostname: cmn01
- ceph_mon_node02_address: 172.16.10.67
- ceph_mon_node02_hostname: cmn02
- ceph_mon_node03_address: 172.16.10.68
- ceph_mon_node03_hostname: cmn03
- ceph_osd_backend: bluestore
- ceph_osd_block_db_size: '10'
- ceph_osd_bond_mode: active-backup
- ceph_osd_count: '2'
- ceph_osd_data_disks: /dev/vdb
- ceph_osd_journal_or_block_db_disks: /dev/vdc
- ceph_osd_node_count: '2'
- ceph_osd_journal_size: '10'
- ceph_osd_primary_first_nic: eth1
- ceph_osd_primary_second_nic: eth2
- ceph_osd_rack01_backend_subnet: 172.16.10
- ceph_osd_rack01_hostname: osd
- ceph_osd_rack01_single_subnet: 172.16.10
- ceph_osd_single_address_ranges: 172.16.10.94-172.16.10.95
- ceph_osd_deploy_address_ranges: 172.16.11.94-172.16.11.95
- ceph_osd_backend_address_ranges: 172.16.10.94-172.16.10.95
- ceph_public_network: 172.16.10.0/24
- ceph_rgw_address: 172.16.10.75
- ceph_rgw_hostname: rgw
- ceph_rgw_node01_address: 172.16.10.76
- ceph_rgw_node01_hostname: rgw01
- ceph_rgw_node02_address: 172.16.10.77
- ceph_rgw_node02_hostname: rgw02
- ceph_rgw_node03_address: 172.16.10.78
- ceph_rgw_node03_hostname: rgw03
- ceph_version: luminous
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-mitaka-dvr-ceph.local
- cluster_name: cookied-mcp-mitaka-dvr-ceph
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- gnocchi_aggregation_storage: ceph
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: mitaka
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
deleted file mode 100644
index 89bf918..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
+++ /dev/null
@@ -1,225 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- osd<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn01.mcp11-ovs-dpdk.local:
- reclass_storage_name: ceph_mon_node01
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn02.mcp11-ovs-dpdk.local:
- reclass_storage_name: ceph_mon_node02
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn03.mcp11-ovs-dpdk.local:
- reclass_storage_name: ceph_mon_node03
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: ceph_rgw_node01
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw02.mcp11-ovs-dpdk.local:
- reclass_storage_name: ceph_rgw_node02
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw03.cmcp11-ovs-dpdk.local:
- reclass_storage_name: ceph_rgw_node03
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
deleted file mode 100644
index 546cc34..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
deleted file mode 100644
index 318a992..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
+++ /dev/null
@@ -1,174 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
-
-{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
deleted file mode 100644
index ef50b6d..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr-ceph/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
deleted file mode 100644
index cb93ac9..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install MongoDB
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
deleted file mode 100644
index 248d63e..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
+++ /dev/null
@@ -1,771 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-dvr-ceph') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.') %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.') %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.') %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.') %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.') %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.') %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.') %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.') %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.') %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.') %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.') %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.') %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.') %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.') %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.') %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd001.') %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd002.') %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.') %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.') %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_CMN01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMN03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: ceph_osd
- capacity: 50
- format: qcow2
- - name: ceph_journal
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
deleted file mode 100644
index 58281a4..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-mitaka-dvr.local
- cluster_name: cookied-mcp-mitaka-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: mitaka
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
deleted file mode 100644
index 931efcb..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_pool_manager_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
deleted file mode 100644
index 8954160..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
deleted file mode 100644
index b335251..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
deleted file mode 100644
index 240f6e3..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=mitaka/Pin: release l=xenial\/openstack\/mitaka testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
deleted file mode 100644
index f2a0907..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install MongoDB
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
deleted file mode 100644
index 81afdb5..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
+++ /dev/null
@@ -1,575 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-mitaka-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-mitaka-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
deleted file mode 100644
index ecc8054..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
+++ /dev/null
@@ -1,186 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-mitaka-ovs.local
- cluster_name: cookied-mcp-mitaka-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: mitaka
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 172.16.10.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 172.16.10.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 172.16.10.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 172.16.10.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 172.16.10.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 172.16.10.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 172.16.10.88
- stacklight_telemetry_node03_hostname: mtr03
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
deleted file mode 100644
index 931efcb..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_pool_manager_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
deleted file mode 100644
index 6a1278e..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
deleted file mode 100644
index 6672997..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
+++ /dev/null
@@ -1,210 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-# SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON
-
-- description: Install neutron service on primary node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server and *01*" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron service on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# WORKAROUND PROD-20976
-- description: WORKAROUND PROD-20976
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run "sed -i
- 's/#min_l3_agents_per_router = 2/min_l3_agents_per_router = 1/'
- /etc/neutron/neutron.conf"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart Neutron services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run 'systemctl restart
- neutron*'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 20}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; openstack security group rule create --proto icmp default'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
deleted file mode 100644
index 9f3767b..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=mitaka/Pin: release l=xenial\/openstack\/mitaka testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/sl.yaml
deleted file mode 100644
index 010324c..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/sl.yaml
+++ /dev/null
@@ -1,258 +0,0 @@
-{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 20}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
deleted file mode 100644
index 4fee5c5..0000000
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
+++ /dev/null
@@ -1,572 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-mitaka-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-mitaka-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
deleted file mode 100644
index 9cb3979..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-newton-dvr.local
- cluster_name: cookied-mcp-newton-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: newton
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
deleted file mode 100644
index 6d958a6..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_pool_manager_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
deleted file mode 100644
index edb5059..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
deleted file mode 100644
index df28c5a..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-newton-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
deleted file mode 100644
index 52ec2f4..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-cookied-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-dvr/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=newton/Pin: release l=xenial\/openstack\/newton testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/sl.yaml
deleted file mode 100644
index 807e362..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/sl.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-{% from 'cookied-mcp-newton-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
deleted file mode 100644
index 7d6147d..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
+++ /dev/null
@@ -1,575 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-newton-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-newton-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-newton-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-newton-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
deleted file mode 100644
index 8049430..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-newton-ovs.local
- cluster_name: cookied-mcp-newton-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: U1hx5V31VJfFFBu8fCsk9ebDN2TwuBABTIcptYQ8tmFSlhSxHIkKnJnDsnckgKnH
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: newton
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
- salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
- rsync_fernet_rotation: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
deleted file mode 100644
index 4970aec..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,150 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
deleted file mode 100644
index 4b79fcb..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
deleted file mode 100644
index c10aa28..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
+++ /dev/null
@@ -1,210 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-# {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-- description: Install neutron service on primary node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server and *01*" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron service on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C "I@neutron:server" state.sls neutron.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# WORKAROUND PROD-20976
-- description: WORKAROUND PROD-20976
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run "sed -i
- 's/#min_l3_agents_per_router = 2/min_l3_agents_per_router = 1/'
- /etc/neutron/neutron.conf"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart Neutron services
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server or I@neutron:gateway' cmd.run 'systemctl restart
- neutron*'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 20}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
deleted file mode 100644
index 89b705e..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=newton/Pin: release l=xenial\/openstack\/newton testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/sl.yaml
deleted file mode 100644
index 8ce7ea8..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/sl.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-{% from 'cookied-mcp-newton-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
deleted file mode 100644
index 883c30f..0000000
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
+++ /dev/null
@@ -1,512 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-newton-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-newton-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-newton-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-newton-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml
deleted file mode 100644
index a3508a6..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Refresh grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the OpenStack control VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
deleted file mode 100644
index 43483ae..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ /dev/null
@@ -1,338 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'smoke') %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'ctl*' state.sls powerdns
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temp workaround of PROD-13167
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
- 'apt-get install python-pymysql -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Create physical volumes on a second disk
- cmd: salt 'ctl*' cmd.run 'pvcreate -y /dev/vdb'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: "Temporary WR set enabled_backends = lvm for cinder"
- cmd: salt 'ctl*' cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt 'gtw01*' cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
deleted file mode 100644
index f64b373..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ /dev/null
@@ -1,326 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OSS: Operational Support System Tools
-
-# Glusterfs
-#-----------
-
-- description: Prepare glusterfs service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:server:enabled:True' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:server:enabled:True' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:client:enabled:True' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server:enabled:True' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Elasticsearch (system service)
-#-------------------------------
-- description: Setup Elasticsearch
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Setup Elasticsearch
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Setup Docker Swarm
-#-------------------
-
-- description: "Workaround: create /var/lib/jenkins to get Jenkins slaves working"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run 'mkdir -p /var/lib/jenkins'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Prepare Docker host
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host:enabled:True' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Docker Swarm master
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Collect grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls salt.minion.grains &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' mine.flush &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' mine.update &&
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules &&
- sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
-
-- description: Install Docker Swarm on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Show Docker Swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Keepalived
-#-----------
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster:enabled:True' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@haproxy:proxy:enabled:True' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the CICD VIP
- cmd: |
- CICD_CONTROL_ADDRESS=`salt --out=newline_values_only -C 'I@haproxy:proxy and I@jenkins:client' pillar.get _param:cluster_vip_address`;
- echo "_param:cluster_vip_address (vip): ${CICD_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C "I@keepalived:cluster:instance:*:address:${CICD_CONTROL_ADDRESS}" cmd.run "ip a | grep ${CICD_CONTROL_ADDRESS}" | grep -B1 ${CICD_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Configure OSS services
-#-----------------------
-
-- description: Setup devops portal
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@devops_portal:config:enabled' state.sls devops_portal.config
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup Rundeck server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:server' state.sls rundeck.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Deploy Docker services
-#-----------------------
-
-# Original comment from pipeline: XXX: for some weird unknown reason, refresh_pillar is required to execute here
-
-- description: "Workaround from the pipeline: XXX: for some weird unknown reason, refresh_pillar is required to execute here"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:publisher' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Workaround from the pipeline: We need /etc/aptly-publisher.yaml to be present before services are deployed. [dd: there were issues when /etc/aptly-publisher.yaml becomes a directory, so this step should be considered]"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:publisher' state.sls aptly.publisher
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install Docker client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: "Workaround from the pipeline: sync all salt objects"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-# Aptly
-#------
-
-#### Steps are commented due to PROD-17598
-
-#- description: "Wait for Aptly to come up in container..."
-# cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' cmd.run
-# 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
-# while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8084/api/version && break; sleep 2; done'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 15}
-# skip_fail: false
-
-#- description: "Setup Aptly"
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' state.sls aptly
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 10}
-# skip_fail: false
-
-# OpenLDAP
-#---------
-
-- description: "Waiting for OpenLDAP to come up in container..."
- cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf ldap://${CICD_CONTROL_ADDRESS} && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: "Setup OpenLDAP"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' state.sls openldap &&
- sleep 20
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Gerrit
-#-------
-
-- description: "Waiting for Gerrit to come up in container..."
- cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8080/config/server/version && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: "Setup Gerrit"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' state.sls gerrit
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Jenkins
-#--------
-
-- description: "Waiting for Jenkins to come up in container..."
- cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:jenkins' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- export JENKINS_CLIENT_USER=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_user);
- export JENKINS_CLIENT_PASSWORD=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_password);
- while true; do
- curl -f -u ${JENKINS_CLIENT_USER}:${JENKINS_CLIENT_PASSWORD} http://${CICD_CONTROL_ADDRESS}:8081/api/json?pretty=true && break;
- sleep 2;
- done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: "Setup Jenkins"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' state.sls jenkins
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-# Postgres && Pushkin
-#--------------------
-
-- description: "Waiting for postgresql database to come up in container..."
-# cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
-# 'while true; do if docker service logs postgresql_db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
- cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
- 'while true; do if docker service logs postgresql_postgresql-db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: ("Create PostgreSQL databases, it fails at first run because of known deadlock:\n"
- "1. State postgresql.client cannot insert values into 'pushkin' database because it is created empty,\n"
- "2. Container with Pushkin cannot start and fill the database scheme until state postgresql.client created users.")
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' state.sls postgresql.client -b 1 &&
- timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8887/apps && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 100}
- skip_fail: false
-
-# Rundeck
-#--------
-
-- description: Waiting for Rundeck to come up in container...
- cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:rundeck' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:4440 && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Setup Rundeck
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' state.sls rundeck.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Elasticsearch (in container, disabled until https://mirantis.jira.com/browse/PROD-15297 is not fixed)
-#--------------
-#- description: 'Waiting for Elasticsearch to come up in container...'
-# cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
-# 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
-# while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 10}
-# skip_fail: false
-#
-#- description: Setup Elasticsearch
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 10}
-# skip_fail: false
-
-
-# Generate docs
-#--------------
-
-- description: Install sphinx (may fail depending on the model)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Generate docs (may fail depending on the model)
- cmd: salt-run state.orchestrate sphinx.orch.generate_doc
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Run salt minion to create cert files for nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-# Final checks
-#-------------
-
-- description: Check for system services in failed state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
deleted file mode 100644
index 91c0506..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ /dev/null
@@ -1,288 +0,0 @@
-default_context:
- mcp_version: testing
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxgROyM+RnJiDhS/qfXayxDbGmMqIGWsWPnc1RdMcJ9xlBM4a
- bj4iNB7wmj19oMRBXKvrvMsbnhOJ1Z1tWH1jwm3rZ7ziJlDUo1Ms/wAPXV67+ayu
- LCSp6JGTzaxo/4YTfzUvvnCJvPKuIf+BjxE6/Rzdzrp6b4FYuvOfkxN/pK4HfrrR
- wJjyQCCeXGrDcq3vKvBaZ/19MN5JtrrCRx4L42UFy1kAkNCCRir+YsK+tiDM3Tfo
- r95PNXdMyeKzMEc858D9XxK+UyNFjGrO2hZm6fmRjgWMuNnaGnVotmT1z1pB91d3
- 5q7n60d3Q7KRn6p+xStrwB7rB9+Jsi3L6q+VEQIDAQABAoIBAExCJnExdvtexO/K
- 9zxgNRJZofv/q5BWbFecIxkD50j2mLTUBtvD8/osnC5aVgJO8nkVAJFRiw5Cqgdp
- PE4i5ANhv5HQ7SsiX/GSO7bst/4WWMAbn2wCpqiZP9mqdzlI0kNgIUXvIyxwLV2M
- f8GwOg52Snmt2S8OGjTNU/wZO0QYzzi44tf2/q0QWy0EV4g2oLq66T/kKpx5FmZQ
- 0cD9GiESfmzWiq2Aivy4if7VmW4fCxTIvmUypSQf+M4J7ZR6QYUbkr19wNEiYAUq
- k9aitJNIVW0johbZwexTTF1YiIVuvSwOI/lHGz1e7iVu/hZxx35JtkzLzF9Dd01q
- M0IMXz0CgYEA95aOjqJTp2KQT++Q4uPl/K1FLNquqZ02SyUNVglkuVn6THHsTC8Q
- MfO+l39bh1QGTK/bh4dyXub2jEYfTSn1K8YMOYp57tgHTJ0Y8AZbtYaEP0g3BeO5
- Myd1/YUY+vM6h58wyoqhDLwRql5u5GM8HAibK32d+Fnrf3VSM0i4jT8CgYEAzL6Y
- c8Fu4ezRiKR1x7jSgbePADRZa7xvLKenuMMYmtg+AixEp5nmm9/vBtmrhE+RQNXw
- mQvt8EId/XGcJhv83Y+QeYg3AhsdGMIYmlGhFGJ3FtcA72wt3FTGOa2KMtmI6khL
- WqYohvESfLtCumW0XPRRUVNKF73UKjMa8VnsOa8CgYBto/CRXXUqJM2/eFlzAHUy
- hhCiIl1Co2oNsOTM+u/t3NiozbJUsmq7lDMMp8uCjEUV5LKUu/h76k+4Ir1t0GzP
- 664yNQ52JJhm5xLKCCbIpj8ePv6Ozx+OdaUclbpQNzHuKSLULrvPBeHUzmjRHtjZ
- mT4N7lzsQ/WzxeKW71c6xQKBgDGrj1qNs7O1ewO2OiiQqujzOgrnqEXdue7QYX0O
- P3rZOPnX+XPbfzmTcu5rghOgJfHftPW8EiY2NAZXOHV6Vrb9bCQ/qnClWUK3W7ac
- VQKX/KIa2Mw8p0eLfWditWMuqOuFTFqacryB4WVHHKIRqFbgopWjKhdmYwE10rR4
- hzlbAoGBAMpZ+D08Us5wrsbVlYfOobuHgq2ENPvQnZqJfTobAPGtrMk/M7M4Ga1U
- +zeO8VA0Tj5jK2qI+MIB2hZmgjp49FbejKFAD+q3srkyqwkGerNXkWOiDGmvYhKR
- UbC4GcycVQsIZK4bw0K7Pl40/u9artsAFmWOoUunyO4QH8J8EDXJ
- -----END RSA PRIVATE KEY-----
- cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGBE7Iz5GcmIOFL+p9drLENsaYyogZaxY+dzVF0xwn3GUEzhpuPiI0HvCaPX2gxEFcq+u8yxueE4nVnW1YfWPCbetnvOImUNSjUyz/AA9dXrv5rK4sJKnokZPNrGj/hhN/NS++cIm88q4h/4GPETr9HN3OunpvgVi685+TE3+krgd+utHAmPJAIJ5casNyre8q8Fpn/X0w3km2usJHHgvjZQXLWQCQ0IJGKv5iwr62IMzdN+iv3k81d0zJ4rMwRzznwP1fEr5TI0WMas7aFmbp+ZGOBYy42doadWi2ZPXPWkH3V3fmrufrR3dDspGfqn7FK2vAHusH34myLcvqr5UR
- cluster_domain: cicd-sl2.local
- cluster_name: integration-dop-sl2
- deployment_type: physical
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- control_network_netmask: 255.255.255.0
- control_vlan: '10'
- deploy_network_gateway: ''
- deploy_network_netmask: 255.255.255.0
- dns_server01: 172.18.176.6
- dns_server02: 172.18.176.6
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_deploy_nic: eth0
- maas_deploy_address: 10.167.4.91
- maas_hostname: mas01
- infra_kvm01_control_address: ${_param:cicd_control_node01_address}
- infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: ${_param:cicd_control_node01_hostname}
- infra_kvm02_control_address: ${_param:cicd_control_node02_address}
- infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: ${_param:cicd_control_node02_hostname}
- infra_kvm03_control_address: ${_param:cicd_control_node03_address}
- infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: ${_param:cicd_control_node03_hostname}
- infra_kvm_vip_address: ${_param:cicd_control_address}
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- opencontrail_enabled: 'False'
- openldap_domain: cicd-sl2.local # Must be plain text because cookiecutter-templates split it by dots
- openldap_enabled: 'True'
- openldap_organisation: ${_param:cluster_name}
- openstack_compute_count: '100'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_control_address: 10.167.4.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: ${_param:openstack_control_address}
- openstack_database_hostname: ${_param:openstack_control_hostname}
- openstack_database_node01_address: ${_param:openstack_control_node01_address}
- openstack_database_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_database_node02_address: ${_param:openstack_control_node02_address}
- openstack_database_node02_hostname: ${_param:openstack_control_node02_hostname}
- openstack_database_node03_address: ${_param:openstack_control_node03_address}
- openstack_database_node03_hostname: ${_param:openstack_control_node03_hostname}
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: ${_param:openstack_control_address}
- openstack_message_queue_hostname: ${_param:openstack_control_hostname}
- openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
- openstack_message_queue_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
- openstack_message_queue_node02_hostname: ${_param:openstack_control_node02_hostname}
- openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
- openstack_message_queue_node03_hostname: ${_param:openstack_control_node03_hostname}
- openstack_network_engine: ovs
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_neutron_qos: 'False'
- openstack_ovs_encapsulation_type: vlan
- openstack_ovs_encapsulation_vlan_range: 2418:2420
- openstack_proxy_address: 10.167.4.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.82
- openstack_proxy_node02_hostname: prx02
- openstack_benchmark_node01_hostname: bmk01
- openstack_benchmark_node01_address: 10.167.4.85
- openstack_version: ocata
- public_host: ${_param:openstack_proxy_address}
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- stacklight_enabled: 'True'
- stacklight_version: '2'
- fluentd_enabled: 'True'
- stacklight_monitor_address: 10.167.4.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.4.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.4.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.4.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_log_address: ${_param:stacklight_monitor_address}
- stacklight_log_hostname: ${_param:stacklight_monitor_hostname}
- stacklight_log_node01_address: ${_param:stacklight_monitor_node01_address}
- stacklight_log_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
- stacklight_log_node02_address: ${_param:stacklight_monitor_node02_address}
- stacklight_log_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
- stacklight_log_node03_address: ${_param:stacklight_monitor_node03_address}
- stacklight_log_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
- stacklight_telemetry_address: ${_param:stacklight_monitor_address}
- stacklight_telemetry_hostname: ${_param:stacklight_monitor_hostname}
- stacklight_telemetry_node01_address: ${_param:stacklight_monitor_node01_address}
- stacklight_telemetry_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
- stacklight_telemetry_node02_address: ${_param:stacklight_monitor_node02_address}
- stacklight_telemetry_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
- stacklight_telemetry_node03_address: ${_param:stacklight_monitor_node03_address}
- stacklight_telemetry_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
- stacklight_long_term_storage_type: influxdb
- tenant_network_gateway: ''
- tenant_network_netmask: 255.255.255.0
- tenant_vlan: '20'
- oss_enabled: 'True'
- oss_openstack_auth_url: http://${_param:openstack_control_address}:5000/v3
- oss_openstack_username: admin
- oss_openstack_password: password
- oss_openstack_project: admin
- oss_openstack_domain_id: default
- oss_openstack_cert: |-
- -----BEGIN CERTIFICATE-----
- MIIF0TCCA7mgAwIBAgIJAJgb8XpikoRNMA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV
- BAYTAmN6MRcwFQYDVQQDDA5TYWx0IE1hc3RlciBDQTEPMA0GA1UEBwwGUHJhZ3Vl
- MREwDwYDVQQKDAhNaXJhbnRpczAeFw0xNzEwMTkxNDU1MTJaFw0yNzEwMTcxNDU1
- MTJaMEoxCzAJBgNVBAYTAmN6MRcwFQYDVQQDDA5TYWx0IE1hc3RlciBDQTEPMA0G
- A1UEBwwGUHJhZ3VlMREwDwYDVQQKDAhNaXJhbnRpczCCAiIwDQYJKoZIhvcNAQEB
- BQADggIPADCCAgoCggIBALMl9l2+98lUSwhRdud2pUvUdEYwXM/fZb4BeHX43Qsr
- hKzId922V3nbliT2VDk4OHck9msKDrDQfYpcXhblt8n077/brvg3c1jt/g9N7OwA
- zk6HFy7Vw0tICJyl4yExYVrpt2Ge0uLf5vkl+f82H2qUXUzlsl1sJ4tz57g448L1
- 26VCRlO5hGuF1Y7w0ZHL5bDhREnsmsWS4gFqfCOR3x+5ortdAEXn1KTON213BkGa
- e13WKWuOcJSMtEvMtTT/6z1MuklGUAZim8++0poauEQwb/RjF9gQuFHNVQbyylW8
- 9/u7EHAEd94VWWGzWlOh9BG/UjNA+JWGmBVS3a3Fij1tp4MbkkiN8s4DdtA0PPDs
- SPUjKQDOdb/sQif6rfVtb292Vn2InTGsQ7+kg4yMo7aoduyBAdp3UbysmWJrsifE
- ZZQBvXCEoyDCbydAsZni+kVxYfdVAx2Y4rUw5B4WJ0C5gIJHjyCuv+WSYio72tW5
- RK0x018dGPVn5oqFRqQbwjDCE2wlzEeINGXk/xD1ytnjxy1r/tA0XadXaHp/R1hI
- 84gQbBiLnAShRnOhCnadL/a9YjMKkYlKqQMYF02xbEG5S6Nnxqv46NkpUQCPI74a
- 7kPIKnPr0+emNp+Rnduzferfb2b+BrRlif5gjHw1+HgJ7XIDmDI4mS++YJ3GO9z/
- AgMBAAGjgbkwgbYwDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAQYwHQYDVR0OBBYE
- FMFJ2EIq4p6WJFyp3I7PN6sPyzoTMHoGA1UdIwRzMHGAFMFJ2EIq4p6WJFyp3I7P
- N6sPyzoToU6kTDBKMQswCQYDVQQGEwJjejEXMBUGA1UEAwwOU2FsdCBNYXN0ZXIg
- Q0ExDzANBgNVBAcMBlByYWd1ZTERMA8GA1UECgwITWlyYW50aXOCCQCYG/F6YpKE
- TTANBgkqhkiG9w0BAQsFAAOCAgEAi4fghF/Ink777HT1qy0MiVw61Z+ZVOhXf4OW
- +VuSVDoc6NWEpekLbtCd8VzFytjrlwVNOywueey1ZMAAYQHplvr+hYkkc1q4WFky
- qn3tFdKZzcF1jX3+fAOtl73XqvB6NknAp+PcyF32kJBnlg9bBzSyvqyu5HrzTGwg
- F2aBH4J3jcb5qUkg31PJQIcCsprz40PbBP/j6XyXw9s//Wji33a43+jmhud4LB9r
- +2ln2lleoKU7Nuu0/hdcmvXQ4qz2V+01p3/Mie0H12bEStECcyCpWxYI0GatsaOz
- mWfnw9+ZZeV+yVcNpkFDF2X7tvK8peTYeyWQRagJF49Z5HGdFn4S+98ddlIhUp16
- 5S2SMEh/nshpBLfZNTV0BQZd3GUOWgpVsTT3bsX7b8bvlidmzXRpfE5tR0ZE6d02
- jGFuYRJwLA038Bk49nznQ/CtGi9qylqR2qPsL4JkJQvQE57Bdt2obKn0aIgt3YLh
- kBFxLx930x9WzETyMPqDnrnsZPkUilXLnszBXB5W+V6u4vnZAV3yZI0/3YKoAEYp
- Lyb7L+8/YEYEWYLm7qgxX2TbTle53EJx4ze//efHOBOIS1Dmyh3JlRRHZEEPv7Mt
- FooWxfsRp7jUhWin99LTlbbp6KdmVz1K9LmrzVPgpz+ZNGXCDM5xklEnrFJy8gX5
- ptYlHCU=
- -----END CERTIFICATE-----
- oss_runbook_private_key: |-
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxgROyM+RnJiDhS/qfXayxDbGmMqIGWsWPnc1RdMcJ9xlBM4a
- bj4iNB7wmj19oMRBXKvrvMsbnhOJ1Z1tWH1jwm3rZ7ziJlDUo1Ms/wAPXV67+ayu
- LCSp6JGTzaxo/4YTfzUvvnCJvPKuIf+BjxE6/Rzdzrp6b4FYuvOfkxN/pK4HfrrR
- wJjyQCCeXGrDcq3vKvBaZ/19MN5JtrrCRx4L42UFy1kAkNCCRir+YsK+tiDM3Tfo
- r95PNXdMyeKzMEc858D9XxK+UyNFjGrO2hZm6fmRjgWMuNnaGnVotmT1z1pB91d3
- 5q7n60d3Q7KRn6p+xStrwB7rB9+Jsi3L6q+VEQIDAQABAoIBAExCJnExdvtexO/K
- 9zxgNRJZofv/q5BWbFecIxkD50j2mLTUBtvD8/osnC5aVgJO8nkVAJFRiw5Cqgdp
- PE4i5ANhv5HQ7SsiX/GSO7bst/4WWMAbn2wCpqiZP9mqdzlI0kNgIUXvIyxwLV2M
- f8GwOg52Snmt2S8OGjTNU/wZO0QYzzi44tf2/q0QWy0EV4g2oLq66T/kKpx5FmZQ
- 0cD9GiESfmzWiq2Aivy4if7VmW4fCxTIvmUypSQf+M4J7ZR6QYUbkr19wNEiYAUq
- k9aitJNIVW0johbZwexTTF1YiIVuvSwOI/lHGz1e7iVu/hZxx35JtkzLzF9Dd01q
- M0IMXz0CgYEA95aOjqJTp2KQT++Q4uPl/K1FLNquqZ02SyUNVglkuVn6THHsTC8Q
- MfO+l39bh1QGTK/bh4dyXub2jEYfTSn1K8YMOYp57tgHTJ0Y8AZbtYaEP0g3BeO5
- Myd1/YUY+vM6h58wyoqhDLwRql5u5GM8HAibK32d+Fnrf3VSM0i4jT8CgYEAzL6Y
- c8Fu4ezRiKR1x7jSgbePADRZa7xvLKenuMMYmtg+AixEp5nmm9/vBtmrhE+RQNXw
- mQvt8EId/XGcJhv83Y+QeYg3AhsdGMIYmlGhFGJ3FtcA72wt3FTGOa2KMtmI6khL
- WqYohvESfLtCumW0XPRRUVNKF73UKjMa8VnsOa8CgYBto/CRXXUqJM2/eFlzAHUy
- hhCiIl1Co2oNsOTM+u/t3NiozbJUsmq7lDMMp8uCjEUV5LKUu/h76k+4Ir1t0GzP
- 664yNQ52JJhm5xLKCCbIpj8ePv6Ozx+OdaUclbpQNzHuKSLULrvPBeHUzmjRHtjZ
- mT4N7lzsQ/WzxeKW71c6xQKBgDGrj1qNs7O1ewO2OiiQqujzOgrnqEXdue7QYX0O
- P3rZOPnX+XPbfzmTcu5rghOgJfHftPW8EiY2NAZXOHV6Vrb9bCQ/qnClWUK3W7ac
- VQKX/KIa2Mw8p0eLfWditWMuqOuFTFqacryB4WVHHKIRqFbgopWjKhdmYwE10rR4
- hzlbAoGBAMpZ+D08Us5wrsbVlYfOobuHgq2ENPvQnZqJfTobAPGtrMk/M7M4Ga1U
- +zeO8VA0Tj5jK2qI+MIB2hZmgjp49FbejKFAD+q3srkyqwkGerNXkWOiDGmvYhKR
- UbC4GcycVQsIZK4bw0K7Pl40/u9artsAFmWOoUunyO4QH8J8EDXJ
- -----END RSA PRIVATE KEY-----
- oss_runbook_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGBE7Iz5GcmIOFL+p9drLENsaYyogZaxY+dzVF0xwn3GUEzhpuPiI0HvCaPX2gxEFcq+u8yxueE4nVnW1YfWPCbetnvOImUNSjUyz/AA9dXrv5rK4sJKnokZPNrGj/hhN/NS++cIm88q4h/4GPETr9HN3OunpvgVi685+TE3+krgd+utHAmPJAIJ5casNyre8q8Fpn/X0w3km2usJHHgvjZQXLWQCQ0IJGKv5iwr62IMzdN+iv3k81d0zJ4rMwRzznwP1fEr5TI0WMas7aFmbp+ZGOBYy42doadWi2ZPXPWkH3V3fmrufrR3dDspGfqn7FK2vAHusH34myLcvqr5UR
-
- # Experimental notification parameters
- oss_pushkin_smtp_host: '127.0.0.1'
- oss_pushkin_smtp_port: '25'
- oss_pushkin_email_sender_password: 'integration-password'
- oss_webhook_from: 'integration-ci@mirantis.com'
- oss_webhook_recipients: 'ddmitriev@mirantis.com'
- oss_webhook_app_id: '24'
- oss_webhook_login_id: '13'
-
- oss_cis_enabled: 'True'
- oss_cis_jobs_repository: https://github.com/Mirantis/rundeck-cis-jobs.git
- oss_cis_jobs_repository_branch: master
- oss_security_audit_enabled: 'True'
- oss_security_audit_os_ssl_verify: 'True'
- oss_security_audit_os_cacert_path: '/srv/volumes/rundeck/storage/content/keys/cis/openstack/cert.pem'
- oss_cleanup_service_enabled: 'True'
-
- # SFDC configuration, to be overriden from test with actual values
- sfdc_sandbox_enabled: True
- sfdc_auth_url: ''
- sfdc_username: ''
- sfdc_password: ''
- sfdc_consumer_key: ''
- sfdc_consumer_secret: ''
- sfdc_organization_id: ''
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
deleted file mode 100644
index 5b1e465..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
+++ /dev/null
@@ -1,204 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid01.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - infra_kvm
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid02.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - infra_kvm
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid03.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - infra_kvm
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_database
- - features_designate
- - features_designate_keystone
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_database
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_database
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
deleted file mode 100644
index f5b4f73..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','virtual-mcp-ocata-dop-sl2') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-cicd_oss.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Temporary workaround for PROD-15890 for downgrade packages
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' cmd.run "apt-get update"
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' cmd.run "apt-get install -y --allow-downgrades vlan";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
deleted file mode 100644
index 7bc48a4..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
+++ /dev/null
@@ -1,241 +0,0 @@
-{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dop-sl2/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 3e70fd8..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 319c007..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
deleted file mode 100644
index 319c007..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
deleted file mode 100644
index 77b2573..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ /dev/null
@@ -1,665 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dop-sl2') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +6
- default_{{ HOSTNAME_GTW02 }}: +7
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
- storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
- use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- private_br:
- vlan_ifaces:
- - 10
-
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
- parent_iface:
- l2_net_dev: private_br
- tag: 10
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: {{ os_env('IMAGE_PATH1604') }} # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: {{ os_env('CFG_NODE_CPU', 2) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 14000) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('PRX_NODE_CPU', 1) }}
- memory: {{ os_env('PRX_NODE_MEMORY', 2048) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CMP_NODE_CPU', 2) }}
- memory: {{ os_env('CMP_NODE_MEMORY', 3072) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CMP_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- - label: ens5
- l2_network_device: private_br
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - private
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('CMP_NODE_CPU', 2) }}
- memory: {{ os_env('CMP_NODE_MEMORY', 3072) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('CMP_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: {{ os_env('GTW_NODE_CPU', 4) }}
- memory: {{ os_env('GTW_NODE_MEMORY', 4096) }}
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: {{ os_env('GTW_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml
deleted file mode 100644
index af81722..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml
+++ /dev/null
@@ -1,137 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Refresh grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Run salt minion to create cert files for nginx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install nginx on prx nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
deleted file mode 100644
index a4b52a5..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ /dev/null
@@ -1,380 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'smoke') %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'ctl*' state.sls powerdns
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temp workaround of PROD-13167
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
- 'apt-get install python-pymysql -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: iptables --policy FORWARD ACCEPT
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
deleted file mode 100644
index fd84b59..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ /dev/null
@@ -1,166 +0,0 @@
-default_context:
- mcp_version: testing
- cicd_control_node01_address: 10.167.4.91
- cicd_control_node01_hostname: cid01
- cicd_control_node02_address: 10.167.4.92
- cicd_control_node02_hostname: cid02
- cicd_control_node03_address: 10.167.4.93
- cicd_control_node03_hostname: cid03
- cicd_control_vip_address: 10.167.4.90
- cicd_control_vip_hostname: cid
- cicd_enabled: 'True'
- cicd_private_key: <<WILL_BE_GENERATED>>
- cicd_public_key: <<WILL_BE_GENERATED>>
- cluster_domain: deploy-name.local
- cluster_name: deployment_name
- deployment_type: physical
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- control_network_netmask: 255.255.255.0
- control_vlan: '10'
- deploy_network_gateway: ''
- deploy_network_netmask: 255.255.255.0
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_deploy_nic: eth0
- maas_deploy_address: 10.167.4.91
- maas_hostname: mas01
- upstream_proxy_enabled: True
- upstream_proxy_address: 10.167.5.1
- upstream_proxy_port: 8080
- infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 10.167.5.241
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 10.167.5.242
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 10.167.5.243
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.240
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- opencontrail_enabled: 'False'
- openldap_enabled: 'False'
- openstack_compute_count: '100'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 10.167.4
- openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_control_address: 10.167.4.10
- openstack_control_hostname: ctl
- openstack_control_node01_address: 10.167.4.11
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 10.167.4.12
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 10.167.4.13
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.50
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.4.51
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.4.52
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.4.53
- openstack_database_node03_hostname: dbs03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 10.167.4.224
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
- openstack_gateway_node02_address: 10.167.4.225
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
- openstack_gateway_node03_address: 10.167.4.226
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.40
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.4.41
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.4.42
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.4.43
- openstack_message_queue_node03_hostname: msg03
- openstack_network_engine: ovs
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 10.167.4.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 10.167.4.81
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 10.167.4.82
- openstack_proxy_node02_hostname: prx02
- openstack_benchmark_node01_hostname: bmk01
- openstack_benchmark_node01_address: 10.167.4.85
- openstack_version: ocata
- public_host: ${_param:openstack_proxy_address}
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_master_address: 10.167.4.15
- salt_master_hostname: cfg01
- salt_master_management_address: 10.167.5.15
- stacklight_enabled: 'True'
- fluentd_enabled: 'True'
- stacklight_version: '2'
- stacklight_long_term_storage_type: influxdb
- stacklight_log_address: 10.167.4.60
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.4.61
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.4.62
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.4.63
- stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 10.167.4.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.4.71
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.4.72
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.4.73
- stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.4.85
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.4.86
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.4.87
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.4.88
- stacklight_telemetry_node03_hostname: mtr03
- tenant_network_gateway: ''
- tenant_network_netmask: 255.255.255.0
- tenant_vlan: '20'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
deleted file mode 100644
index 2141209..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
+++ /dev/null
@@ -1,354 +0,0 @@
-nodes:
- cfg01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - features_designate
- - features_designate_keystone
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - features_designate_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mon03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log03.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw02.mcp-ocata-dvr-vxlan.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
deleted file mode 100644
index 829d515..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','virtual-devops-mcp-ocata-sl2') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
deleted file mode 100644
index c71f82d..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
+++ /dev/null
@@ -1,239 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-#Launch containers
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
- cmd: |
- FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Fluentd service presence: ${FLUENTD_SERVICE}";
- if [[ "$FLUENTD_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- else
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
- cmd: |
- CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Ceilometer service presence: ${CEILO}";
- if [[ "$CEILO" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Remote Collector in Docker Swarm for Openstack deployments
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 19ae10b..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
deleted file mode 100644
index 319c007..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
deleted file mode 100644
index 319c007..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
deleted file mode 100644
index 502997f..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ /dev/null
@@ -1,1163 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr-vxlan') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID = os_env('HOSTNAME_CID', 'cid.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM = os_env('HOSTNAME_KVM', 'kvm.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL = os_env('HOSTNAME_CTL', 'ctl.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON = os_env('HOSTNAME_MON', 'mon.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG = os_env('HOSTNAME_LOG', 'log.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR = os_env('HOSTNAME_MTR', 'mtr.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
-
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +15
-
- default_{{ HOSTNAME_CID }}: +90
- default_{{ HOSTNAME_CID01 }}: +91
- default_{{ HOSTNAME_CID02 }}: +92
- default_{{ HOSTNAME_CID03 }}: +93
-
- default_{{ HOSTNAME_KVM }}: +240
- default_{{ HOSTNAME_KVM01 }}: +241
- default_{{ HOSTNAME_KVM02 }}: +242
- default_{{ HOSTNAME_KVM03 }}: +243
-
- default_{{ HOSTNAME_CTL }}: +10
- default_{{ HOSTNAME_CTL01 }}: +11
- default_{{ HOSTNAME_CTL02 }}: +12
- default_{{ HOSTNAME_CTL03 }}: +13
- default_{{ HOSTNAME_DBS }}: +50
- default_{{ HOSTNAME_DBS01 }}: +51
- default_{{ HOSTNAME_DBS02 }}: +52
- default_{{ HOSTNAME_DBS03 }}: +53
- default_{{ HOSTNAME_GTW01 }}: +224
- default_{{ HOSTNAME_GTW02 }}: +225
- default_{{ HOSTNAME_GTW03 }}: +226
- default_{{ HOSTNAME_MSG }}: +40
- default_{{ HOSTNAME_MSG01 }}: +41
- default_{{ HOSTNAME_MSG02 }}: +42
- default_{{ HOSTNAME_MSG03 }}: +43
- default_{{ HOSTNAME_PRX }}: +80
- default_{{ HOSTNAME_PRX01 }}: +81
- default_{{ HOSTNAME_PRX02 }}: +82
- default_{{ HOSTNAME_BMK01 }}: +85
-
- default_{{ HOSTNAME_MON }}: +70
- default_{{ HOSTNAME_MON01 }}: +71
- default_{{ HOSTNAME_MON02 }}: +72
- default_{{ HOSTNAME_MON03 }}: +73
- default_{{ HOSTNAME_LOG }}: +60
- default_{{ HOSTNAME_LOG01 }}: +61
- default_{{ HOSTNAME_LOG02 }}: +62
- default_{{ HOSTNAME_LOG03 }}: +63
- default_{{ HOSTNAME_MTR }}: +85
- default_{{ HOSTNAME_MTR01 }}: +86
- default_{{ HOSTNAME_MTR02 }}: +87
- default_{{ HOSTNAME_MTR03 }}: +88
-
- default_{{ HOSTNAME_CMP01 }}: +101
- default_{{ HOSTNAME_CMP02 }}: +102
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- private_br:
- vlan_ifaces:
- - 10
-
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
- parent_iface:
- l2_net_dev: private_br
- tag: 10
-
- external:
- address_pool: external-pool01
- dhcp: false
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CID02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- # KVM* nodes required for services like glusterfs.server
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_DBS03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MSG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_LOG03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MTR03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 3072
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private_br
- interface_model: *interface_model
- - label: ens5
- l2_network_device: private_br
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - private
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 3072
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604_hwe
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
deleted file mode 100644
index a74e3d7..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- designate_backend: bind
- cluster_domain: cookied-mcp-ocata-dvr.local
- cluster_name: cookied-mcp-ocata-dvr
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
deleted file mode 100644
index 3a11834..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_bind9_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_bind9_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
deleted file mode 100644
index fc5d4f8..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
deleted file mode 100644
index dc9de1c..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
-{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install OpenStack control services
-
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
- upload:
- local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
- local_filename: overrides-policy.yml
- remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
- cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
- node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
- cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
- ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
deleted file mode 100644
index 1f35a6b..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-parameters:
- nova:
- controller:
- policy:
- context_is_admin: 'role:admin or role:administrator'
- 'compute:create': 'rule:admin_or_owner'
- 'compute:create:attach_network':
- cinder:
- controller:
- policy:
- 'volume:delete': 'rule:admin_or_owner'
- 'volume:extend':
- neutron:
- server:
- policy:
- create_subnet: 'rule:admin_or_network_owner'
- 'get_network:queue_id': 'rule:admin_only'
- 'create_network:shared':
- glance:
- server:
- policy:
- publicize_image: "role:admin"
- add_member:
- keystone:
- server:
- policy:
- admin_or_token_subject: 'rule:admin_required or rule:token_subject'
- heat:
- server:
- policy:
- context_is_admin: 'role:admin and is_admin_project:True'
- deny_stack_user: 'not role:heat_stack_user'
- deny_everybody: '!'
- 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
- 'cloudformation:DescribeStackResources':
- ceilometer:
- server:
- policy:
- segregation: 'rule:context_is_admin'
- 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
deleted file mode 100644
index 9d3deb7..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-dvr/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=ocata/Pin: release l=xenial\/openstack\/ocata testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
deleted file mode 100644
index 405e647..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index d75dab1..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- # Enable grub menu using updated config below
- - update-grub
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
deleted file mode 100644
index 4893e2c..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
+++ /dev/null
@@ -1,575 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
deleted file mode 100644
index 2a6d8f9..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-ocata-ovs.local
- cluster_name: cookied-mcp-ocata-ovs
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: U1hx5V31VJfFFBu8fCsk9ebDN2TwuBABTIcptYQ8tmFSlhSxHIkKnJnDsnckgKnH
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 172.18.176.6
- dns_server02: 172.18.208.44
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '2'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: ocata
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_webhook_app_id: '24'
- oss_pushkin_email_sender_password: password
- oss_pushkin_smtp_port: '587'
- oss_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- backup_private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
- k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
- Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
- 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
- lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
- MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
- yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
- dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
- FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
- 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
- g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
- AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
- CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
- H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
- gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
- MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
- lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
- ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
- SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
- HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
- 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
- M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
- erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
- aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
- 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
- -----END RSA PRIVATE KEY-----
- backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
- salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
- salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
- fluentd_enabled: 'True'
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
deleted file mode 100644
index 4970aec..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
+++ /dev/null
@@ -1,150 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- - features_lvm_backend_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - features_designate_bind9_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - features_lvm_backend_volume_vdb
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
deleted file mode 100644
index 6fc2af4..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
deleted file mode 100644
index 4072632..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Deploy nginx before openstack services (PROD-22740)
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-# install designate backend
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://images.mirantis.com.s3.amazonaws.com/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-x64-20170828.qcow2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set gateway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
deleted file mode 100644
index 41827c7..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':', 1) %}
-- description: Override cluster parameters
- cmd: |
- salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
- cmd: salt '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=ocata/Pin: release l=xenial\/openstack\/ocata testing/g' /etc/apt/preferences.d/mirantis_openstack"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
deleted file mode 100644
index 7cc598b..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3fbb777..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
deleted file mode 100644
index 2d31a5a..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
+++ /dev/null
@@ -1,512 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'cookied-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: route
-
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
- - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
- # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
- # or
- # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: &interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 6144
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
deleted file mode 100644
index 00c55da..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-dvr-ceph' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
-
- # Bind9 services are placed on the first two ctl nodes
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Mitaka release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Temporary workaround !! Fix or debug
- cmd: |
- sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
deleted file mode 100644
index 80d1f62..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Mitaka release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
deleted file mode 100644
index d9a6489..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Mitaka release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
deleted file mode 100644
index dc8661a..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-newton-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-newton-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Newton release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
deleted file mode 100644
index 3dad09c..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-newton-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-newton-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- # Bind9 services are placed on the first two ctl nodes
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: "Disable designate worker for Newton release"
- cmd: |
- set -e;
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
- salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
deleted file mode 100644
index 1da953d..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dop-sl2' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-cicd_oss.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
deleted file mode 100644
index 38a1d10..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr-vxlan' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
deleted file mode 100644
index 2700813..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-dvr.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- # Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
deleted file mode 100644
index 1cfc554..0000000
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-ovs' %}
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-ovs.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for combined roles: remove unnecessary classes"
- cmd: |
- set -e;
- sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-
- # Bind9 services are placed on the first two ctl nodes
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}