Merge "Apply workaround for PROD-14756 in virt templates"
diff --git a/tcp_tests/environment/.keep b/tcp_tests/environment/.keep
new file mode 100644
index 0000000..89260ab
--- /dev/null
+++ b/tcp_tests/environment/.keep
@@ -0,0 +1,4 @@
+This 'environment' folder should be removed after
+refactoring the template physical_mcp11_ovs_dpdk to
+generate the 'environment' model instead of use
+pre-defined 'lab03_ovs_dpdk'.
\ No newline at end of file
diff --git a/tcp_tests/environment/environment_inventory/lab03_physical_inventory.yaml b/tcp_tests/environment/environment_inventory/lab03_physical_inventory.yaml
deleted file mode 100644
index c87e92a..0000000
--- a/tcp_tests/environment/environment_inventory/lab03_physical_inventory.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-nodes:
- # Physical nodes
-
- kvm01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm02.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- eno1:
- role: single_mgm
- eno2:
- role: bond0_ab_ovs_vlan_ctl
-
- kvm03.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- eno1:
- role: single_mgm
- eno2:
- role: bond0_ab_ovs_vlan_ctl
-
- cmp001.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_node01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_ovs_vlan_ctl
- enp5s0f0:
- role: bond0_ab_ovs_vlan_ctl
- enp5s0f1:
- role: bond2_dpdk_prv
- dpdk_pci: '0000:05:00.1'
- enp5s0f2:
- role: bond2_dpdk_prv
- dpdk_pci: '0000:05:00.2'
-
- cmp002.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_node02
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- eno1:
- role: single_mgm
- eth0:
- role: bond0_ab_ovs_vlan_ctl
- eth3:
- role: bond0_ab_ovs_vlan_ctl
- eth2:
- role: bond2_dpdk_prv
- dpdk_pci: '0000:05:00.1'
- eth4:
- role: bond2_dpdk_prv
- dpdk_pci: '0000:0b:00.0'
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- enp2s0f1:
- role: bond0_ab_dvr_vlan_ctl_prv_floating
-
- gtw02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- enp2s0f1:
- role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/environment/environment_inventory/vcp_os_sl_inventory.yaml b/tcp_tests/environment/environment_inventory/vcp_os_sl_inventory.yaml
deleted file mode 100644
index de95d4c..0000000
--- a/tcp_tests/environment/environment_inventory/vcp_os_sl_inventory.yaml
+++ /dev/null
@@ -1,209 +0,0 @@
-nodes:
- # Virtual Control Plane nodes
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- dbs03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- msg03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mdb01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_telemetry_node01
- roles:
- - openstack_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mdb02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_telemetry_node02
- roles:
- - openstack_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mdb03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_telemetry_node03
- roles:
- - openstack_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_monitor_node01
- roles:
- - stacklight_monitor_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_monitor_node02
- roles:
- - stacklight_monitor
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_monitor_node03
- roles:
- - stacklight_monitor
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- mtr03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- log03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
diff --git a/tcp_tests/environment/environment_inventory/virtual-devops-mcp-ocata-sl2.yaml b/tcp_tests/environment/environment_inventory/virtual-devops-mcp-ocata-sl2.yaml
deleted file mode 100644
index c520c5c..0000000
--- a/tcp_tests/environment/environment_inventory/virtual-devops-mcp-ocata-sl2.yaml
+++ /dev/null
@@ -1,350 +0,0 @@
-nodes:
- cfg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm02.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- kvm03.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid01.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid02.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- cid03.mcp11-ovs-dpdk.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - features_designate
- - features_designate_keystone
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - openstack_control
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- ctl03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - openstack_control
- - features_designate
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node01
- roles:
- - openstack_database_leader
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node02
- roles:
- - openstack_database
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- dbs03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_database_node03
- roles:
- - openstack_database
- - features_designate_database
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node01
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node02
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- msg03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_message_queue_node03
- roles:
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mdb01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_telemetry_node01
- roles:
- - openstack_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mdb02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_telemetry_node02
- roles:
- - openstack_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mdb03.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_telemetry_node03
- roles:
- - openstack_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- prx02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_proxy_node02
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_telemetry_node01
- roles:
- - stacklight_telemetry_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_telemetry_node02
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- mtr03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_log_node01
- roles:
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_log_node02
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- log03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_log_node03
- roles:
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_vlan_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_gateway_node02
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens5:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- ens6:
- role: bond1_ab_ovs_floating
diff --git a/tcp_tests/environment/environment_template/readme.txt b/tcp_tests/environment/environment_template/readme.txt
deleted file mode 100644
index e364476..0000000
--- a/tcp_tests/environment/environment_template/readme.txt
+++ /dev/null
@@ -1,111 +0,0 @@
-Render the template
--------------------
-
-Use reclass_tools from [1] to render the template.
-
-<env_name> : any name for newly created environment model
-<path_to_template> : path to the directory with the cookiecutter template, for example 'tcp-qa/tcp_tests/environment/environment_template'
-<destination_path> : path to directory where will be created the new environment model
-<inventory_fileN> : path to the YAML with the inventory data. Can be specified multiple times to use different parts of the inventory. VCP inventory must be specified for current workflow (when all linux.network.interface are deleted)
-
-
-```
-reclass-tools render -e <env_name> -t <path_to_template> -o <destination_path> -c <inventory_file1> -c <inventory_file2> [...] -c <inventory_fileN>
-```
-
-To attach the environment model to any cluster, use the instructions from [1]
-
-[1] https://github.com/dis-xcom/reclass_tools
-
-
-Template architecture
----------------------
-
-```
-└── environment_template
- ├── {{ cookiecutter._env_name }}
- │ ├── init.yml
- │ ├── linux_network_interface.yml
- │ ├── linux_system_codename_trusty.yml
- │ ├── linux_system_codename_xenial.yml
- │ ├── {# interfaces #} -> ../{# interfaces #}
- │ └── {# roles #} -> ../{# roles #}
- ├── {# interfaces #}
- │ └── ...
- └── {# roles #}
- └── ...
-```
-
-* {{ cookiecutter._env_name }} : folder that will be used to generate the new Environment model, contains:
-
- - init.yml : file that will be filled with reclass:storage:node pillars generated using inventory files. This is the main file that must be
- included to cfg01.* node before generating the reclass inventory with the salt state 'reclass.storage'.
-
- - linux_network_interface.yml : it is a workaround to make possible to pass the linux:network:interface configuration separatelly for each node.
- This is because reclass.storage state works only with parameters:_param:* pillars and cannot be used
- to add any other pillar data to parameters:* (for example, parameters:linux:network:interface).
- So, linux network interface configuration is stored in the intermediate variable parameters:_param:linux_network_interfaces
- for each node, and then included to the parameters:linux:network:interface using this file as a class attached
- to each node by default.
- - linux_system_codename_*.yml : A class with a single variable. Allows to specify the specific linux version using only node role.
-
- - {# interfaces #} : symlink to the {# interfaces #} folder outside of the cookiecutter template directory, to not pass it's content to the
- resulting model during the model rendering.
-
- - {# roles #} : symlink to the {# roles #} folder outside of the cookiecutter template directory, to not pass it's content to the
- resulting model during the model rendering.
-
-* {# interfaces #} : Interface role means the name of the file that will be included and rendered.
- Contains *text* patterns of YAML file that are included to the init.yml under linux_network_interface: parameters
- using Jinja.
- Each pattern provides the mapping of the physical interfaces which have the same role on some logical networking objects
- (OVS, bonds, bridges, ...). These networking objects provide the Underlay interfaces used for upcoming cluster architecture.
-
-* {# roles #} : Node roles mean the name of the files that will be included and rendered under the 'classes:' object.
- Contains *text* patterns of YAML file that are included to the init.yml and must provide only the
- list of the classes for the specific node.
-
-In the init.yml is defined a dict variable 'params' that is accessible from files in {# interfaces #} and {# roles #}.
-'params' may be used by Jinja expressions in these folders to generate some additional dynamic 'parameters:_param' pillars that cannot be specified
-as a fixed value in a class.
-
-If you need to specify a fixed values, please do the following:
-- add a new class file *.yml file next to the init.yml with the necessary *FIXED* parameters (example: linux_system_codename_xenial.yml)
-- add a node role to the {# roles #} directory that will include your environment.{{ cookiecutter._env_name }}.<class file from first step>
-- use the created node role in the inventory for required nodes
-
-
-Inventory examples
-------------------
-
-Inventory must include all the nodes, physical or virtual.
-'reclass_storage_name' is used for back compatibility until some parameters
-are still inherited from the cluster/system level of the reclass:storage pillars.
-
-Physical node example:
-```
-nodes:
- kvm01.mcp11-ovs-dpdk.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- enp3s0f0:
- role: single_mgm
- enp3s0f1:
- role: bond0_ab_nondvr_vxlan_ctl_mesh
-```
-
-Virtual Control Plane node example:
-```
-nodes:
- ctl01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - openstack_control_leader
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-```
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_contrail" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_contrail"
deleted file mode 100644
index 8de76d6..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_contrail"
+++ /dev/null
@@ -1,63 +0,0 @@
-{#- Provides: #}
-{#- br_mesh (linux bridge) + tenant address #}
-{#- vhost0 , as a keepalived vip interface #}
-{#- Requirements: #}
-{#- _param:tenant_vlan #}
-{#- _param:tenant_address #}
-{#- _param:tenant_netmask #}
-{#- _param:tenant_network_gateway #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- name: {{ interface_name }}
- proto: manual
- type: eth
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- mtu: 9000
- enabled: true
- mode: active-backup
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
-
- {{- set_param('keepalived_vip_interface', 'vhost0') }}
- vhost0:
- mtu: 9000
- address: ${_param:_esc}{_param:tenant_address}
- netmask: ${_param:_esc}{_param:tenant_network_netmask}
- gateway: ${_param:_esc}{_param:tenant_network_gateway}
- enabled: true
- proto: static
- type: eth
- pre_up_cmds:
- - /usr/lib/contrail/if-vhost0
- name_servers:
- - ${_param:_esc}{_param:dns_server01}
- - ${_param:_esc}{_param:dns_server02}
- use_interfaces:
- - bond0.${_param:_esc}{_param:tenant_vlan}
- require_interfaces:
- - bond0.${_param:_esc}{_param:tenant_vlan}
- bond0.tenant_vlan:
- mtu: 9000
- name: bond0.${_param:_esc}{_param:tenant_vlan}
- enabled: true
- proto: manual
- type: vlan
- use_interfaces:
- - bond0
- require_interfaces:
- - bond0
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ctl_contrail" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ctl_contrail"
deleted file mode 100644
index dfee7cd..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ctl_contrail"
+++ /dev/null
@@ -1,88 +0,0 @@
-{#- Provides: #}
-{#- br_ctl (linux bridge) + control address #}
-{#- vhost0 #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_vlan #}
-{#- _param:control_network_netmask #}
-{#- _param:tenant_vlan #}
-{#- _param:tenant_address #}
-{#- _param:tenant_netmask #}
-{#- _param:tenant_network_gateway #}
-{#- _param:_param:dns_server01 #}
-{#- _param:_param:dns_server02 #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- name: {{ interface_name }}
- proto: manual
- type: eth
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- mtu: 9000
- enabled: true
- mode: active-backup
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
-
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- proto: static
- type: bridge
- use_interfaces:
- - bond0.${_param:_esc}{_param:control_vlan}
- require_interfaces:
- - bond0.${_param:_esc}{_param:control_vlan}
- bond0.control_vlan:
- mtu: 9000
- name: bond0.${_param:_esc}{_param:control_vlan}
- enabled: true
- proto: manual
- type: vlan
- use_interfaces:
- - bond0
- require_interfaces:
- - bond0
-
- vhost0:
- mtu: 9000
- address: ${_param:_esc}{_param:tenant_address}
- netmask: ${_param:_esc}{_param:tenant_network_netmask}
- gateway: ${_param:_esc}{_param:tenant_network_gateway}
- enabled: true
- proto: static
- type: eth
- pre_up_cmds:
- - /usr/lib/contrail/if-vhost0
- name_servers:
- - ${_param:_esc}{_param:dns_server01}
- - ${_param:_esc}{_param:dns_server02}
- use_interfaces:
- - bond0.${_param:_esc}{_param:tenant_vlan}
- require_interfaces:
- - bond0.${_param:_esc}{_param:tenant_vlan}
- bond0.tenant_vlan:
- mtu: 9000
- name: bond0.${_param:_esc}{_param:tenant_vlan}
- enabled: true
- proto: manual
- type: vlan
- use_interfaces:
- - bond0
- require_interfaces:
- - bond0
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_dvr_vlan_ctl_prv_floating" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_dvr_vlan_ctl_prv_floating"
deleted file mode 100644
index a4f891d..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_dvr_vlan_ctl_prv_floating"
+++ /dev/null
@@ -1,65 +0,0 @@
-{#- Tenant over VLAN. For gateway nodes and computes with dvr #}
-{#- Provides: #}
-{#- br_ctl (OVS bridge) + control address #}
-{#- br_prv (OVS bridge) #}
-{#- br_floating (OVS bridge) #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- master: bond0 # ?
- name: {{ interface_name }}
- proto: manual
- type: slave # ?
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- enabled: true
- mode: active-backup
- ovs_bridge: br-floating
- ovs_type: OVSPort
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- br-floating:
- enabled: true
- type: ovs_bridge
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- bridge: br-floating
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- ovs_options: tag=${_param:_esc}{_param:control_vlan}
- proto: static
- type: ovs_port
- require_interfaces:
- - br-floating
- br-prv:
- enabled: true
- type: ovs_bridge
- floating-to-prv:
- enabled: true
- type: ovs_port
- port_type: patch
- bridge: br-floating
- peer: prv-to-floating
- prv-to-floating:
- enabled: true
- type: ovs_port
- port_type: patch
- bridge: br-prv
- peer: floating-to-prv
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_dvr_vxlan_ctl_mesh_floating" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_dvr_vxlan_ctl_mesh_floating"
deleted file mode 100644
index 265aba8..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_dvr_vxlan_ctl_mesh_floating"
+++ /dev/null
@@ -1,63 +0,0 @@
-{#- Tenant over VXLAN. For gateway nodes and computes with dvr #}
-{#- Provides: #}
-{#- br_ctl (OVS bridge) + control address #}
-{#- br_mesh (OVS bridge) + tenant address #}
-{#- br_floating (OVS bridge) #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-{#- _param:tenant_address #}
-{#- _param:tenant_netmask #}
-{#- _param:tenant_vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- master: bond0 # ?
- name: {{ interface_name }}
- proto: manual
- type: slave # ?
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- enabled: true
- mode: active-backup
- ovs_bridge: br-floating
- ovs_type: OVSPort
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- br-floating:
- enabled: true
- type: ovs_bridge
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- bridge: br-floating
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- ovs_options: tag=${_param:_esc}{_param:control_vlan}
- proto: static
- type: ovs_port
- require_interfaces:
- - br-floating
- br-mesh:
- enabled: true
- type: ovs_port
- bridge: br-floating
- proto: static
- ovs_options: tag=${_param:_esc}{_param:tenant_vlan}
- address: ${_param:_esc}{_param:tenant_address}
- netmask: ${_param:_esc}{_param:tenant_network_netmask}
- require_interfaces:
- - br-floating
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_nondvr_vlan_ctl_prv" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_nondvr_vlan_ctl_prv"
deleted file mode 100644
index 53cdb09..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_nondvr_vlan_ctl_prv"
+++ /dev/null
@@ -1,49 +0,0 @@
-{#- Tenant over VLAN. For computes without dvr #}
-{#- Provides: #}
-{#- br_ctl (OVS bridge) + control address #}
-{#- br_prv (OVS bridge) #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- master: bond0 # ?
- name: {{ interface_name }}
- proto: manual
- type: slave # ?
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- enabled: true
- mode: active-backup
- ovs_bridge: br-prv
- ovs_type: OVSPort
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- br-prv:
- enabled: true
- type: ovs_bridge
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- bridge: br-prv
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- ovs_options: tag=${_param:_esc}{_param:control_vlan}
- proto: static
- type: ovs_port
- require_interfaces:
- - br-prv
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_nondvr_vxlan_ctl_mesh" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_nondvr_vxlan_ctl_mesh"
deleted file mode 100644
index 42dbfca..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_nondvr_vxlan_ctl_mesh"
+++ /dev/null
@@ -1,76 +0,0 @@
-{#- Tenant over VXLAN. For computes without dvr #}
-{#- Provides: #}
-{#- br_ctl (linux bridge) + control address #}
-{#- br_mesh (linux bridge) + tenant address #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-{#- _param:tenant_address #}
-{#- _param:tenant_netmask #}
-{#- _param:tenant_vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- name: {{ interface_name }}
- proto: manual
- type: eth
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- enabled: true
- mode: active-backup
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
-
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- proto: static
- type: bridge
- use_interfaces:
- - bond0.${_param:_esc}{_param:control_vlan}
- require_interfaces:
- - bond0.${_param:_esc}{_param:control_vlan}
- bond0.control_vlan:
- name: bond0.${_param:_esc}{_param:control_vlan}
- enabled: true
- proto: manual
- type: vlan
- use_interfaces:
- - bond0
- require_interfaces:
- - bond0
-
- br_mesh:
- address: ${_param:_esc}{_param:tenant_address}
- netmask: ${_param:_esc}{_param:tenant_network_netmask}
- enabled: true
- proto: static
- type: bridge
- use_interfaces:
- - bond0.${_param:_esc}{_param:tenant_vlan}
- require_interfaces:
- - bond0.${_param:_esc}{_param:tenant_vlan}
- bond0.tenant_vlan:
- name: bond0.${_param:_esc}{_param:tenant_vlan}
- enabled: true
- proto: manual
- type: vlan
- use_interfaces:
- - bond0
- require_interfaces:
- - bond0
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vlan_ctl" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vlan_ctl"
deleted file mode 100644
index d6bbd72..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vlan_ctl"
+++ /dev/null
@@ -1,53 +0,0 @@
-{#- Control network with bond. For control plane nodes #}
-{#- Provides: #}
-{#- br_ctl (linux bridge) + control address #}
-{#- _param:keepalived_vip_interface = 'br_ctl' #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- name: {{ interface_name }}
- proto: manual
- type: eth
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- enabled: true
- mode: active-backup
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
-
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- proto: static
- type: bridge
- use_interfaces:
- - bond0.${_param:_esc}{_param:control_vlan}
- require_interfaces:
- - bond0.${_param:_esc}{_param:control_vlan}
- bond0.control_vlan:
- name: bond0.${_param:_esc}{_param:control_vlan}
- enabled: true
- proto: manual
- type: vlan
- use_interfaces:
- - bond0
- require_interfaces:
- - bond0
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vlan_ctl_prv" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vlan_ctl_prv"
deleted file mode 100644
index 3cba8de..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vlan_ctl_prv"
+++ /dev/null
@@ -1,49 +0,0 @@
-{#- Tenant over VLAN. For gateway and compute nodes #}
-{#- Provides: #}
-{#- br_ctl (OVS bridge) + control address #}
-{#- br_prv (OVS bridge) #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- master: bond0 # ?
- name: {{ interface_name }}
- proto: manual
- type: slave # ?
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- enabled: true
- mode: active-backup
- ovs_bridge: br-prv
- ovs_type: OVSPort
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- br-prv:
- enabled: true
- type: ovs_bridge
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- bridge: br-prv
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- ovs_options: tag=${_param:_esc}{_param:control_vlan}
- proto: static
- type: ovs_port
- require_interfaces:
- - br-prv
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vxlan_ctl_mesh" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vxlan_ctl_mesh"
deleted file mode 100644
index 2aea73d..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond0_ab_ovs_vxlan_ctl_mesh"
+++ /dev/null
@@ -1,62 +0,0 @@
-{#- Tenant over VXLAN. For gateway and compute nodes #}
-{#- Provides: #}
-{#- br_ctl (OVS bridge) + control address #}
-{#- br_mesh (OVS bridge) + tenant address #}
-{#- Requirements: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-{#- _param:tenant_address #}
-{#- _param:tenant_netmask #}
-{#- _param:tenant_vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- master: bond0 # ?
- name: {{ interface_name }}
- proto: manual
- type: slave # ?
- ipflush_onchange: true
- {%- endfor %}
- bond0:
- enabled: true
- mode: active-backup
- ovs_bridge: br-ten
- ovs_type: OVSPort
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- br-ten:
- enabled: true
- type: ovs_bridge
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- bridge: br-ten
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- ovs_options: tag=${_param:_esc}{_param:control_vlan}
- proto: static
- type: ovs_port
- require_interfaces:
- - br-ten
- br-mesh:
- enabled: true
- type: ovs_port
- bridge: br-ten
- proto: static
- ovs_options: tag=${_param:_esc}{_param:tenant_vlan}
- address: ${_param:_esc}{_param:tenant_address}
- netmask: ${_param:_esc}{_param:tenant_network_netmask}
- require_interfaces:
- - br-ten
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond1_ab_ovs_floating" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond1_ab_ovs_floating"
deleted file mode 100644
index 9df5f46..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond1_ab_ovs_floating"
+++ /dev/null
@@ -1,38 +0,0 @@
-{#- Floating on a separated bond interfaces. For gateway nodes and computes with dvr #}
-{#- Provides: #}
-{#- br_floating (OVS bridge) #}
-{#- Requirements: #}
-{#- _param:?addr #}
-{#- _param:?mask #}
-{#- _param:?vlan #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- mtu: 9000
- enabled: true
- master: bond1 # ?
- name: {{ interface_name }}
- proto: manual
- type: slave # ?
- ipflush_onchange: true
- {%- endfor %}
- bond1:
- enabled: true
- mode: active-backup
- ovs_bridge: br-floating
- ovs_type: OVSPort
- proto: manual
- slaves: {{ ' '.join(interfaces.keys()) }}
- type: bond
- use_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- require_interfaces:
- {%- for interface_name in interfaces.keys() %}
- - {{ interface_name }}
- {%- endfor %}
- br-floating:
- enabled: true
- type: ovs_bridge
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond2_dpdk_prv" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond2_dpdk_prv"
deleted file mode 100644
index 39bc013..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/bond2_dpdk_prv"
+++ /dev/null
@@ -1,27 +0,0 @@
-{#- Provides: #}
-{#- br-prv (OVS bridge) #}
-{#- Requirements: #}
-{#- _param:tenant_address #}
-{#- _param:tenant_netmask #}
-
- # {{ interfaces_role }}
- {%- for interface_name, interface in interfaces.items() %}
- {{ interface_name }}:
- bond: bonddpdk2
- driver: igb_uio
- enabled: true
- n_rxq: 2
- name: {{ interface_name }}
- pci: '{{ interface['dpdk_pci'] }}'
- type: dpdk_ovs_port
- {%- endfor %}
- bonddpdk2:
- bridge: br-prv
- enabled: true
- mode: active-backup
- type: dpdk_ovs_bond
- br-prv:
- address: ${_param:_esc}{_param:tenant_address}
- enabled: true
- netmask: ${_param:_esc}{_param:tenant_netmask}
- type: dpdk_ovs_bridge
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/readme.txt" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/readme.txt"
deleted file mode 100644
index cf80b5b..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/readme.txt"
+++ /dev/null
@@ -1,10 +0,0 @@
-Patterns from this folder are used for two purposes:
-1. Configure the real node interfaces as specified in the inventory.
-2. Provide the following list of interfaces for the underlay level:
- - br_mgm: Access from infrastructure management network / admin network / DHCP / PXE
- - br_ctl: OpenStack control network for internal services
- - br-prv: For tenant networks with VLAN segmentation
- - br-ten: For tenant networks with VXLAN segmentation
- - br-mesh: Endpoint for VXLAN tunnels that are used by br-ten
- - br-floating: Connection to the floating network
- - vhost0: for OpenContrail workloads
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_ctl" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_ctl"
deleted file mode 100644
index ce54ed7..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_ctl"
+++ /dev/null
@@ -1,31 +0,0 @@
-{#- Control network. For control plane nodes #}
-{#- Provides: #}
-{#- br_ctl (linux bridge) + address #}
-{#- _param:keepalived_vip_interface = 'br_ctl' #}
-{#- Requires: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:dns_server01 #}
-{#- _param:dns_server02 #}
-
- # {{ interfaces_role }}
- {%- set interface_name = interfaces.keys()[0] %}
- {{ interface_name }}:
- enabled: true
- name: {{ interface_name }}
- proto: manual
- type: eth
- ipflush_onchange: true
- br_ctl:
- address: ${_param:_esc}{_param:single_address}
- enabled: true
- netmask: ${_param:_esc}{_param:control_network_netmask}
- proto: static
- type: bridge
- name_servers:
- - ${_param:_esc}{_param:dns_server01}
- - ${_param:_esc}{_param:dns_server02}
- use_interfaces:
- - {{ interface_name }}
- require_interfaces:
- - {{ interface_name }}
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_dhcp" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_dhcp"
deleted file mode 100644
index 87315e0..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_dhcp"
+++ /dev/null
@@ -1,11 +0,0 @@
-{#- Management (admin) network with DHCP #}
-{#- Provides: #}
-{#- <interface_name> + DHCP address #}
-
- # {{ interfaces_role }}
- {%- set interface_name = interfaces.keys()[0] %}
- {{ interface_name }}:
- enabled: true
- type: eth
- proto: dhcp
- name: {{ interface_name }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_mgm" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_mgm"
deleted file mode 100644
index a0de958..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_mgm"
+++ /dev/null
@@ -1,30 +0,0 @@
-{#- Management (admin) network. For nodes where deploy address must be configured as static (at least cfg*) #}
-{#- Provides: #}
-{#- br_mgm (linux bridge) + address #}
-{#- Requires: #}
-{#- _param:deploy_address #}
-{#- _param:deploy_network_netmask #}
-{#- _param:deploy_network_gateway #}
-{#- _param:dns_server01 #}
-{#- _param:dns_server02 #}
-
- # {{ interfaces_role }}
- {%- set interface_name = interfaces.keys()[0] %}
- {{ interface_name }}:
- enabled: true
- name: {{ interface_name }}
- proto: manual
- type: eth
- ipflush_onchange: true
- br_mgm:
- address: ${_param:_esc}{_param:deploy_address}
- netmask: ${_param:_esc}{_param:deploy_network_netmask} #lab03: 255.255.255.192
- gateway: ${_param:_esc}{_param:deploy_network_gateway} #lab03: 172.16.49.1
- enabled: true
- name_servers:
- - ${_param:_esc}{_param:dns_server01}
- - ${_param:_esc}{_param:dns_server02}
- proto: static
- type: bridge
- use_interfaces:
- - {{ interface_name }}
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_mgm_dhcp" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_mgm_dhcp"
deleted file mode 100644
index fd2bf66..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_mgm_dhcp"
+++ /dev/null
@@ -1,18 +0,0 @@
-{#- Management (admin) network. For nodes where deploy address provided by DHCP server #}
-{#- Provides: #}
-{#- br_mgm (linux bridge) + DHCP address #}
-
- # {{ interfaces_role }}
- {%- set interface_name = interfaces.keys()[0] %}
- {{ interface_name }}:
- enabled: true
- type: eth
- proto: manual
- ipflush_onchange: true
- name: {{ interface_name }}
- br_mgm:
- enabled: true
- proto: dhcp
- type: bridge
- use_interfaces:
- - {{ interface_name }}
diff --git "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_vlan_ctl" "b/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_vlan_ctl"
deleted file mode 100644
index 2b899a2..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 interfaces \043\175/single_vlan_ctl"
+++ /dev/null
@@ -1,41 +0,0 @@
-{#- Control network. For control plane nodes #}
-{#- Provides: #}
-{#- br_ctl (linux bridge) + address #}
-{#- _param:keepalived_vip_interface = 'br_ctl' #}
-{#- Requires: #}
-{#- _param:single_address #}
-{#- _param:control_network_netmask #}
-{#- _param:control_vlan #}
-{#- _param:dns_server01 #}
-{#- _param:dns_server02 #}
-
- # {{ interfaces_role }}
- {%- set interface_name = interfaces.keys()[0] %}
- {{ interface_name }}:
- enabled: true
- name: {{ interface_name }}
- proto: manual
- type: eth
- ipflush_onchange: true
- br_ctl:
- enabled: true
- address: ${_param:_esc}{_param:single_address}
- netmask: ${_param:_esc}{_param:control_network_netmask}
- proto: static
- type: bridge
- name_servers:
- - ${_param:_esc}{_param:dns_server01}
- - ${_param:_esc}{_param:dns_server02}
- use_interfaces:
- - {{ interface_name }}.${_param:_esc}{_param:control_vlan}
- require_interfaces:
- - {{ interface_name }}.${_param:_esc}{_param:control_vlan}
- {{ interface_name }}.control_vlan:
- name: {{ interface_name }}.${_param:_esc}{_param:control_vlan}
- enabled: true
- proto: manual
- type: vlan
- use_interfaces:
- - {{ interface_name }}
- require_interfaces:
- - {{ interface_name }}
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_linux_network_interface" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/_linux_network_interface"
deleted file mode 100644
index 9ab7824..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_linux_network_interface"
+++ /dev/null
@@ -1,23 +0,0 @@
-{#- Collect interface roles and params into the following dict:
- # interface_role: # filename that will be included
- # interface_name: # interface (eth0)
- # interface_param1: value # optional parameters or empty dict
- # interface_param2: value
- # ...
- #}
-{%- set interface_roles = {} %}
-{%- for interface_name, interface in node['interfaces'].items() %}
- {%- if interface['role'] not in interface_roles %}
- {%- set _ = interface_roles.update({interface['role']: {}}) %}
- {%- endif %}
- {%- set _ = interface_roles[interface['role']].update({interface_name: {}}) %}
- {%- for param_name, param in interface.items() %}
- {%- set _ = interface_roles[interface['role']][interface_name].update({param_name: param}) %}
- {%- endfor %}
-{%- endfor %}
-{%- set _ = params.update({'linux_network_interfaces': "\n"}) %}
-{%- for interfaces_role, interfaces in interface_roles.items() %}
- {%- import ("{# interfaces #}/" + interfaces_role) as interface with context %}
- {%- set _ = params.update({'linux_network_interfaces': params['linux_network_interfaces'] + interface|string }) %}
-{%- endfor %}
- - environment.{{ cookiecutter._env_name }}.linux_network_interface
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_metadata_process" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/_metadata_process"
deleted file mode 100644
index 5809398..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_metadata_process"
+++ /dev/null
@@ -1,124 +0,0 @@
-{#-
-1. Check if 'local_metadata' matches to something in 'global_metadata'.
- If yes, fetch and process the data.
- If no, initialize necessary data.
- If partially intersects (keepalived roles for different clusters): add a fail-state warning to the YAML to avoid using the wrong config
-2. Set necessary 'params' using the 'local_metadata'
-3. Store the 'local_metadata' to the 'global_metadata' if required.
-4. Increment/decrement the 'global_metadata' objects if necessary (next IP address, next vrouter ID, master->slave, etc)
-
-global_metadata keep states across the nodes
-local_metadata keep states for the current node only
-
-Example of local_metadata and global_metadata runtime content:
-
- local_metadata:
- keepalived_vip_priority:
- - openstack_control
- - openstack_database
- - openstack_message_queue
- rabbitmq_cluster_role:
- - openstack_message_queue
-
- global_metadata:
- keepalived_vip_priority: # Separate counters
- openstack_control|openstack_database|openstack_message_queue: 254
- cicd_control|infra_kvm: 254
- keepalived_vip_virtual_router_id: # Common counter
- __latest: 11
- openstack_control|openstack_database|openstack_message_queue: 10
- cicd_control|infra_kvm: 11
- mysql_cluster_role:
- openstack_database: master
-#}
-
-{%- macro stateful_roles_check(counter_name) %}
-{#- ####################################### -#}
-
- {#- 1. Check that there is no intersections between different groups of roles for the <counter_name> #}
- {%- for names, counter in global_metadata.get(counter_name, {}).items() %}
- {%- set global_roles = names.split('|') %}
- {%- for local_counter_role_name in local_metadata.get(counter_name, []) %}
- {%- if local_counter_role_name in global_roles %}
- {%- set adding_names = local_metadata.get(counter_name, [])|sort|join('|') %}
- {%- if names != adding_names %}
- {#- Found unexpected combination of roles, cause the template rendering exception #}
- {%- include("======> NODE ROLES MAPPING ERROR! Please check the roles for the node '" + inventory_node_name + "' , metaparam '" + counter_name + "':\n======> Existing roles: " + names + "\n======> Adding roles: " + adding_names) %}
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- {%- endfor %}
-{%- endmacro %}
-
-{%- macro stateful_counter(counter_name, counter_start, counter_end, counter_step, uniq_per_node=True) %}
-{#- ############################################################################# -#}
-{%- if counter_name in local_metadata %}
- {{- stateful_roles_check(counter_name) }}
-
- {%- if counter_name not in global_metadata %}
- {%- set _ = global_metadata.update({counter_name: {}}) %}
- {%- endif %}
- {%- set counter_roles_name = local_metadata[counter_name]|sort|join('|') %}
-
- {%- if uniq_per_node == True %}
-
- {%- if counter_roles_name not in global_metadata[counter_name] %}
- {#- Set default value for <counter_roles_name> = <counter_start> #}
- {%- set _ = global_metadata[counter_name].update({counter_roles_name: counter_start}) %}
- {%- else %}
- {#- Increment or decrement value <counter_roles_name> #}
- {%- set _ = global_metadata[counter_name].update({counter_roles_name: global_metadata[counter_name][counter_roles_name] + counter_step}) %}
- {%- if global_metadata[counter_name][counter_roles_name] == counter_end %}
- {# Cause a jinja render exception and make visible the message with correct counter_name #}
- {%- include("======> VALUE_ERROR: " + counter_name + "=" + counter_end + " is out of bounds!" ) %}
- {%- endif %}
- {%- endif %}
-
- {%- else %}
-
- {%- if '__latest' not in global_metadata[counter_name] %}
- {#- Set the value for __latest = <counter_start> #}
- {%- set _ = global_metadata[counter_name].update({'__latest': counter_start}) %}
- {%- endif %}
- {%- if counter_roles_name not in global_metadata[counter_name] %}
- {%- set _ = global_metadata[counter_name].update({'__latest': global_metadata[counter_name]['__latest'] + counter_step}) %}
- {%- if global_metadata[counter_name]['__latest'] == counter_end %}
- {# Cause a jinja render exception and make visible the message with correct counter_name #}
- {%- include("======> VALUE_ERROR: " + counter_name + "=" + counter_end + " is out of bounds!" ) %}
- {%- endif %}
- {%- set _ = global_metadata[counter_name].update({counter_roles_name: global_metadata[counter_name]['__latest']}) %}
- {%- endif %}
-
- {%- endif %}
- {%- set _ = params.update({counter_name: global_metadata[counter_name][counter_roles_name]}) %}
-{%- endif %}
-{%- endmacro %}
-
-{%- macro stateful_masterslave(masterslave_name, master_name='master', slave_name='slave') %}
-{#- ##################################################################################### -#}
-{%- if masterslave_name in local_metadata %}
- {{- stateful_roles_check(masterslave_name) }}
-
- {%- if masterslave_name not in global_metadata %}
- {%- set _ = global_metadata.update({masterslave_name: {}}) %}
- {%- endif %}
- {%- set masterslave_roles_name = local_metadata[masterslave_name]|sort|join('|') %}
-
- {%- if masterslave_roles_name not in global_metadata[masterslave_name] %}
- {#- Set first value <masterslave_roles_name> = <master_name> #}
- {%- set _ = global_metadata[masterslave_name].update({masterslave_roles_name: master_name}) %}
- {%- else %}
- {#- Set value <masterslave_roles_name> = <slave_name> #}
- {%- set _ = global_metadata[masterslave_name].update({masterslave_roles_name: slave_name}) %}
- {%- endif %}
- {%- set _ = params.update({masterslave_name: global_metadata[masterslave_name][masterslave_roles_name]}) %}
-{%- endif %}
-{%- endmacro %}
-
-{{- stateful_counter('cicd_database_id', counter_start=1, counter_end=255, counter_step=1) }}
-{{- stateful_counter('opencontrail_database_id', counter_start=1, counter_end=255, counter_step=1) }}
-{{- stateful_counter('keepalived_vip_priority', counter_start=254, counter_end=1, counter_step=-1) }}
-{{- stateful_counter('keepalived_vip_virtual_router_id', counter_start=159, counter_end=250, counter_step=1, uniq_per_node=False) }}
-{{- stateful_masterslave('rabbitmq_cluster_role') }}
-{{- stateful_masterslave('mysql_cluster_role') }}
-{{- stateful_masterslave('redis_cluster_role') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_overrides" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/_overrides"
deleted file mode 100644
index ce3e4b1..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_overrides"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - environment.{{ cookiecutter._env_name }}.overrides
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/ceph_mon" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/ceph_mon"
deleted file mode 100644
index b07032f..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/ceph_mon"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.ceph.mon
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'ceph_mon') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'ceph_mon') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_leader"
deleted file mode 100644
index 8f92bdd..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_leader"
+++ /dev/null
@@ -1,13 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.cicd.control.leader
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'cicd_control') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'cicd_control') }}
-{{- register_metaparam('cicd_database_id', 'cicd_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_manager" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_manager"
deleted file mode 100644
index 9028ddd..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_manager"
+++ /dev/null
@@ -1,13 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.cicd.control.manager
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'cicd_control') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'cicd_control') }}
-{{- register_metaparam('cicd_database_id', 'cicd_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate"
deleted file mode 100644
index e295f2b..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - environment.{{ cookiecutter._env_name }}.features.designate.system
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate_database" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate_database"
deleted file mode 100644
index e541620..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate_database"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - environment.{{ cookiecutter._env_name }}.features.designate.database
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate_keystone" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate_keystone"
deleted file mode 100644
index 3fdc6e7..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/features_designate_keystone"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - environment.{{ cookiecutter._env_name }}.features.designate.keystone
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_config" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_config"
deleted file mode 100644
index 0bddd76..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_config"
+++ /dev/null
@@ -1,11 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.infra.config
- - environment.{{ cookiecutter._env_name }}.reclass_datasource_local
- - environment.{{ cookiecutter._env_name }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_idm" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_idm"
deleted file mode 100644
index b9842d1..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_idm"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.infra.idm
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_kvm" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_kvm"
deleted file mode 100644
index d0f8666..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_kvm"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.infra.kvm
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'infra_kvm') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'infra_kvm') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_maas" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_maas"
deleted file mode 100644
index 7c925d2..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_maas"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.infra.maas
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_proxy" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_proxy"
deleted file mode 100644
index 1bd8693..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_proxy"
+++ /dev/null
@@ -1,13 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.proxy
- - cluster.${_param:cluster_name}.stacklight.proxy
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'infra_proxy') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'infra_proxy') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_rsyslog" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_rsyslog"
deleted file mode 100644
index 3fcffa6..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_rsyslog"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.infra.rsyslog
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/kubernetes_control" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/kubernetes_control"
deleted file mode 100644
index 0af2378..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/kubernetes_control"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.kubernetes.control
-{{- set_param('keepalived_vip_interface', 'ens3') }}
-{{- register_metaparam('keepalived_vip_priority', 'kubernetes_control') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'kubernetes_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_centos" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_centos"
deleted file mode 100644
index 2f199df..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_centos"
+++ /dev/null
@@ -1 +0,0 @@
-{{- set_param('linux_system_codename', 'centos') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_trusty" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_trusty"
deleted file mode 100644
index 997e3f5..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_trusty"
+++ /dev/null
@@ -1 +0,0 @@
-{{- set_param('linux_system_codename', 'trusty') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_xenial" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_xenial"
deleted file mode 100644
index a473f56..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/linux_system_codename_xenial"
+++ /dev/null
@@ -1 +0,0 @@
-{{- set_param('linux_system_codename', 'xenial') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/monitoring_service" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/monitoring_service"
deleted file mode 100644
index ff7de21..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/monitoring_service"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.monitoring.server
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_analytics" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_analytics"
deleted file mode 100644
index c8a8921..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_analytics"
+++ /dev/null
@@ -1,13 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.opencontrail.analytics
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('opencontrail_database_id', 'opencontrail_analytics') }}
-{{- register_metaparam('keepalived_vip_priority', 'opencontrail_analytics') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'opencontrail_analytics') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_control" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_control"
deleted file mode 100644
index e74a9cf..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_control"
+++ /dev/null
@@ -1,13 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.opencontrail.control
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('opencontrail_database_id', 'opencontrail_control') }}
-{{- register_metaparam('keepalived_vip_priority', 'opencontrail_control') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'opencontrail_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_gateway" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_gateway"
deleted file mode 100644
index a75b537..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_gateway"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.opencontrail.gateway
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_tor" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_tor"
deleted file mode 100644
index d42a19a..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_tor"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.opencontrail.tor
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'opencontrail_tor') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'opencontrail_tor') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_baremetal" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_baremetal"
deleted file mode 100644
index f6739e4..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_baremetal"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.baremetal
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_baremetal') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_baremetal') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_benchmark" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_benchmark"
deleted file mode 100644
index 93b92aa..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_benchmark"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.benchmark
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_billing" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_billing"
deleted file mode 100644
index a78fae9..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_billing"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.billing
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_catalog" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_catalog"
deleted file mode 100644
index e4612af..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_catalog"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.catalog
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_catalog') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_catalog') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute"
deleted file mode 100644
index 6665b76..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.compute
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute_dpdk" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute_dpdk"
deleted file mode 100644
index 1585758..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute_dpdk"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.compute.dpdk
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute_sriov" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute_sriov"
deleted file mode 100644
index 2e512c4..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_compute_sriov"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.compute.sriov
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control"
deleted file mode 100644
index 00a8c57..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.control
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_control') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control_leader"
deleted file mode 100644
index 3a49d2a..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control_leader"
+++ /dev/null
@@ -1,10 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- {%- include ("{# roles #}/" + 'openstack_control') %}
- - cluster.${_param:cluster_name}.openstack.control_init
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control_upgrade" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control_upgrade"
deleted file mode 100644
index 24bbddc..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control_upgrade"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.upgrade
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dashboard" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dashboard"
deleted file mode 100644
index 65751e9..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dashboard"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.dashboard
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database"
deleted file mode 100644
index 3c3c87f..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database"
+++ /dev/null
@@ -1,14 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.database
- - service.galera.slave.cluster
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('mysql_cluster_role', 'openstack_database') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_database') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_database') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database_leader"
deleted file mode 100644
index c8e2112..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database_leader"
+++ /dev/null
@@ -1,11 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- {%- include ("{# roles #}/" + 'openstack_database') %}
- - cluster.${_param:cluster_name}.openstack.database_init
- - service.galera.master.cluster
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dns" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dns"
deleted file mode 100644
index 07ee063..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dns"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.dns
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_dns') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_dns') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_gateway" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_gateway"
deleted file mode 100644
index 9521c1d..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_gateway"
+++ /dev/null
@@ -1,9 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.gateway
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_gateway_octavia" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_gateway_octavia"
deleted file mode 100644
index 35edfae..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_gateway_octavia"
+++ /dev/null
@@ -1,10 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.gateway
- - cluster.${_param:cluster_name}.openstack.octavia_manager
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_message_queue" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_message_queue"
deleted file mode 100644
index d59d3f1..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_message_queue"
+++ /dev/null
@@ -1,13 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.message_queue
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('rabbitmq_cluster_role', 'openstack_message_queue') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_message_queue') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_message_queue') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_proxy" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_proxy"
deleted file mode 100644
index f342287..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_proxy"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.proxy
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_proxy') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_proxy') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_telemetry" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_telemetry"
deleted file mode 100644
index 02f8e2b..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_telemetry"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.openstack.telemetry
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_telemetry') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_telemetry') }}
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log"
deleted file mode 100644
index 293effe..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log"
+++ /dev/null
@@ -1,14 +0,0 @@
-{#- For StackLight v1 and v2 #}
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.stacklight.log
- - cluster.${_param:cluster_name}.stacklight.log_curator
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklight_log') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_log') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v1" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v1"
deleted file mode 100644
index da974b3..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v1"
+++ /dev/null
@@ -1,11 +0,0 @@
-{#- For StackLight v1 only #}
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- {%- include ("{# roles #}/" + 'stacklight_log') %}
- - system.elasticsearch.client.single
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v2" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v2"
deleted file mode 100644
index efa482e..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v2"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#- For StackLight v2 only #}
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- {%- include ("{# roles #}/" + 'stacklight_log') %}
- - system.elasticsearch.client.single
- - system.kibana.client.single
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor"
deleted file mode 100644
index 59361cd..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor"
+++ /dev/null
@@ -1,14 +0,0 @@
-{#- For StackLight v1 only #}
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.stacklight.monitor
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('redis_cluster_role', 'stacklight_monitor') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklight_monitor') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_monitor') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor_leader"
deleted file mode 100644
index 8f7853d..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor_leader"
+++ /dev/null
@@ -1,12 +0,0 @@
-{#- For StackLight v1 only #}
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- {%- include ("{# roles #}/" + 'stacklight_monitor') %}
- - system.grafana.client.single
- - system.kibana.client.single
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry"
deleted file mode 100644
index b5b8344..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry"
+++ /dev/null
@@ -1,15 +0,0 @@
-{#- For StackLight v1 and v2 #}
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.stacklight.telemetry
- - service.galera.slave.cluster
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklight_telemetry') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_telemetry') }}
-{{- register_metaparam('mysql_cluster_role', 'stacklight_telemetry') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry_leader"
deleted file mode 100644
index c439777..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry_leader"
+++ /dev/null
@@ -1,15 +0,0 @@
-{#- For StackLight v1 and v2 #}
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - cluster.${_param:cluster_name}.stacklight.telemetry
- - service.galera.master.cluster
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklight_telemetry') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_telemetry') }}
-{{- register_metaparam('mysql_cluster_role', 'stacklight_telemetry') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server"
deleted file mode 100644
index c6032d3..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server"
+++ /dev/null
@@ -1,14 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - system.docker.swarm.manager
- - cluster.${_param:cluster_name}.stacklight.server
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('redis_cluster_role', 'stacklightv2_server') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklightv2_server') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklightv2_server') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server_leader"
deleted file mode 100644
index a5acec2..0000000
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server_leader"
+++ /dev/null
@@ -1,15 +0,0 @@
-{#-
-parameters:
- reclass:
- storage:
- node:
- <reclass_storage_node_name>:
- classes:
-#}
- - system.docker.swarm.master
- - cluster.${_param:cluster_name}.stacklight.server
- - cluster.${_param:cluster_name}.stacklight.client
-{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('redis_cluster_role', 'stacklightv2_server') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklightv2_server') }}
-{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklightv2_server') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/database.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/database.yml"
deleted file mode 100644
index e1e12d7..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/database.yml"
+++ /dev/null
@@ -1,3 +0,0 @@
-classes:
-- system.galera.server.database.designate
-- environment.{{ cookiecutter._env_name }}.features.designate
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/init.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/init.yml"
deleted file mode 100644
index 33f4259..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/init.yml"
+++ /dev/null
@@ -1,38 +0,0 @@
-{%- macro password(size=16) -%}
-{% for index in range(size) %}{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random }}{% endfor %}
-{%- endmacro -%}
-parameters:
- _param:
- # Put overrides for any environment-specific variables here
- powerdns_webserver_password: {{ password(12) }}
- powerdns_webserver_port: 8081
- designate_domain_id: 5186883b-91fb-4891-bd49-e6769234a8fc
- designate_pdns_api_key: {{ password(12) }}
- designate_pdns_api_endpoint: "http://${_param:openstack_control_node01_address}:${_param:powerdns_webserver_port}"
- designate_pool_ns_records:
- - hostname: 'ns1.example.org.'
- priority: 10
- designate_pool_nameservers:
- - host: ${_param:openstack_control_node01_address}
- port: 53
- - host: ${_param:openstack_control_node02_address}
- port: 53
- - host: ${_param:openstack_control_node03_address}
- port: 53
- designate_pool_target_type: pdns4
- designate_pool_target_masters:
- - host: ${_param:openstack_control_node01_address}
- port: 5354
- - host: ${_param:openstack_control_node02_address}
- port: 5354
- - host: ${_param:openstack_control_node03_address}
- port: 5354
- designate_pool_target_options:
- host: ${_param:openstack_control_node01_address}
- port: 53
- api_token: ${_param:designate_pdns_api_key}
- api_endpoint: ${_param:designate_pdns_api_endpoint}
- designate_version: ${_param:openstack_version}
- designate_service_host: ${_param:openstack_control_address}
- mysql_designate_password: {{ password(16) }}
- keystone_designate_password: {{ password(16) }}
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/keystone.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/keystone.yml"
deleted file mode 100644
index 4726bf6..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/keystone.yml"
+++ /dev/null
@@ -1,3 +0,0 @@
-classes:
-- system.keystone.client.service.designate
-- environment.{{ cookiecutter._env_name }}.features.designate
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/system.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/system.yml"
deleted file mode 100644
index 3fa26a6..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/features/designate/system.yml"
+++ /dev/null
@@ -1,38 +0,0 @@
-classes:
-- system.designate.server.cluster
-- system.designate.server.backend.pdns
-- environment.{{ cookiecutter._env_name }}.features.designate
-parameters:
- powerdns:
- server:
- axfr_ips:
- - ${_param:openstack_control_node01_address}
- - ${_param:openstack_control_node02_address}
- - ${_param:openstack_control_node03_address}
- - 127.0.0.1
- designate:
- server:
- pools:
- default:
- description: 'test pool'
- targets:
- default:
- description: 'test target1'
- default1:
- type: ${_param:designate_pool_target_type}
- description: 'test target2'
- masters: ${_param:designate_pool_target_masters}
- options:
- host: ${_param:openstack_control_node02_address}
- port: 53
- api_endpoint: "http://${_param:openstack_control_node02_address}:${_param:powerdns_webserver_port}"
- api_token: ${_param:designate_pdns_api_key}
- default2:
- type: ${_param:designate_pool_target_type}
- description: 'test target3'
- masters: ${_param:designate_pool_target_masters}
- options:
- host: ${_param:openstack_control_node03_address}
- port: 53
- api_endpoint: "http://${_param:openstack_control_node03_address}:${_param:powerdns_webserver_port}"
- api_token: ${_param:designate_pdns_api_key}
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/init.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/init.yml"
deleted file mode 100644
index 89db66f..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/init.yml"
+++ /dev/null
@@ -1,55 +0,0 @@
-{# 'infra_config_classes' list object is dynamically generated from 'roles' to add on the cfg node #}
-{%- set infra_config_classes = [] %}
-{# 'global_metadata' is a global collection of objects shared between nodes #}
-{%- set global_metadata = {} %}
-{%- set common_roles = ['_linux_network_interface', '_metadata_process', '_overrides'] %}
-
-parameters:
- _param:
- _esc: $
- reclass:
- storage:
- node:
- {%- for inventory_node_name, node in nodes.items()|sort %}
- {%- set node_name = inventory_node_name.split('.')[0] %}
- {{ node['reclass_storage_name'] }}:
-
- {#- 'local_metadata' is to collect the data from different roles in a single place inside the node #}
- {%- set local_metadata = {} %}
- {#- 'params' dict object is dynamically generated from 'roles' to add on the node #}
- {%- set params = {} %}
-
- {%- macro register_metaparam(param_name, role_name) %}
- {#- Add the <role_name> into the list attached to <param_name> #}
- {#- to control that the <param_name> is not spreaded across incompatible roles #}
- {%- set _ = local_metadata.update({param_name: local_metadata.get(param_name, []) + [role_name]}) %}
- {%- endmacro %}
-
- {%- macro set_param(param_name, param_value) %}
- {#- Set a parameter for '_param' dict for the node #}
- {%- set _ = params.update({param_name: param_value}) %}
- {%- endmacro %}
-
- classes:
- {#- Default roles are added to each node #}
- {#- 'overrides' must be the very last role for each node #}
- {%- for role in node.get('roles', []) + common_roles %}
- {%- include ("{# roles #}/" + role) %}
- {%- endfor %}
-
- {%- if params %}
- params:
- {%- for param_name, param in params.items() %}
- {{ param_name }}: {{ param }}
- {%- endfor %}
- {%- endif %}
- name: {{ node_name }}
- domain: ${_param:cluster_domain}
- {%- endfor %}
-
-classes:
-# Enable root and *ALL* users access
-- system.openssh.server.team.all
-{%- for infra_config_class in infra_config_classes %}
-- {{ infra_config_class }}
-{%- endfor %}
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/linux_network_interface.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/linux_network_interface.yml"
deleted file mode 100644
index 8474d17..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/linux_network_interface.yml"
+++ /dev/null
@@ -1,6 +0,0 @@
-parameters:
- # _param:
- # Put overrides for any environment-specific variables here
- linux:
- network:
- interface: ${_param:linux_network_interfaces}
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/overrides.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/overrides.yml"
deleted file mode 100644
index f35b469..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/overrides.yml"
+++ /dev/null
@@ -1,20 +0,0 @@
-# Do not delete! This file is for global parameters overrides over all the nodes in the environment.
-classes:
-# Enable root and *ALL* users access
-- system.openssh.server.team.all
-
-# Example: salt-call reclass.cluster_meta_set foo bar /path/to/overrides.yml
-#parameters:
-# _param:
-# foo: bar
-parameters:
- _param:
- apt_mk_version: stable # This will be replaced with REPOSITORY_SUITE env variable
- openssh:
- server:
- password_auth: true
- linux:
- system:
- user:
- root:
- password: $6$oV7iKxfx$7DOZUfLw30d/W.pzUf97F0z1JhxgNmFAiKT1HhIodjkWb0M/.dTYAc3uxDCScR3lvCALjRe4/xWtiDyywf8wi1
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/reclass_datasource_local.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/reclass_datasource_local.yml"
deleted file mode 100644
index 1c334f7..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/reclass_datasource_local.yml"
+++ /dev/null
@@ -1,7 +0,0 @@
-# Switch to the local metadata storage instead of git
-parameters:
-# local storage
- reclass:
- storage:
- data_source:
- engine: local
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/\173\043 interfaces \043\175" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/\173\043 interfaces \043\175"
deleted file mode 120000
index c95e61c..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/\173\043 interfaces \043\175"
+++ /dev/null
@@ -1 +0,0 @@
-../{# interfaces #}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/\173\043 roles \043\175" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/\173\043 roles \043\175"
deleted file mode 120000
index c52ceb2..0000000
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/\173\043 roles \043\175"
+++ /dev/null
@@ -1 +0,0 @@
-../{# roles #}
\ No newline at end of file
diff --git a/tcp_tests/fixtures/openstack_fixtures.py b/tcp_tests/fixtures/openstack_fixtures.py
index a704ee6..98e367c 100644
--- a/tcp_tests/fixtures/openstack_fixtures.py
+++ b/tcp_tests/fixtures/openstack_fixtures.py
@@ -22,17 +22,19 @@
@pytest.fixture(scope='function')
-def openstack_actions(config, underlay, salt_deployed):
+def openstack_actions(config, hardware, underlay, salt_deployed):
"""Fixture that provides various actions for OpenStack
:param config: fixture provides oslo.config
+ :param config: fixture provides oslo.config
:param underlay: fixture provides underlay manager
:param salt_deployed: fixture provides salt manager
:rtype: OpenstackManager
For use in tests or fixtures to deploy a custom OpenStack
"""
- return openstack_manager.OpenstackManager(config, underlay, salt_deployed)
+ return openstack_manager.OpenstackManager(config, underlay,
+ hardware, salt_deployed)
@pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index e1420b5..a1476e3 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -196,3 +196,26 @@
underlay = underlay_ssh_manager.UnderlaySSHManager(config)
return underlay
+
+
+@pytest.fixture(scope='function', autouse=True)
+def grab_versions(request, underlay):
+ """Fixture for grab package versions at the end of test
+
+ Marks:
+ grab_versions(name=None) - make snapshot if test is passed. If
+ name argument provided, it will be used for creating data,
+ otherwise, test function name will be used
+
+ """
+ grab_version = request.keywords.get('grab_versions', None)
+
+ def test_fin():
+ default_name = getattr(request.node.function, '_name',
+ request.node.function.__name__)
+ if hasattr(request.node, 'rep_call') and request.node.rep_call.passed \
+ and grab_version:
+ artifact_name = utils.extract_name_from_mark(grab_version) or \
+ "{}".format(default_name)
+ underlay.get_logs(artifact_name)
+ request.addfinalizer(test_fin)
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index c800cc2..8afb177 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -330,6 +330,39 @@
raise exceptions.EnvironmentIsNotSet()
self.__env.destroy()
+ def destroy_node(self, node_name):
+ """Destroy node"""
+ node = self.__env.get_node(name=node_name)
+ node.destroy()
+
+ def start_node(self, node_name):
+ """Start node"""
+ node = self.__env.get_node(name=node_name)
+ node.start()
+
+ def reboot_node(self, node_name):
+ """Reboot node"""
+ node = self.__env.get_node(name=node_name)
+ node.reboot()
+
+ def remove_node(self, node_name):
+ """Remove node"""
+ node = self.__env.get_node(name=node_name)
+ node.remove()
+
+ def wait_for_node_state(self, node_name, state, timeout):
+ node = self.__env.get_node(name=node_name)
+ if 'active' in state:
+ helpers.wait(lambda: node.is_active(),
+ timeout=timeout,
+ timeout_msg=('Node {0} failed '
+ 'to become active'.format(node)))
+ else:
+ helpers.wait(lambda: not node.is_active(),
+ timeout=timeout,
+ timeout_msg=('Node {0} failed '
+ 'to become active'.format(node)))
+
def has_snapshot(self, name):
return self.__env.has_snapshot(name)
@@ -353,6 +386,15 @@
LOG.debug('Trying to get nodes by role {0}'.format(node_role))
return self.__env.get_nodes(role=node_role)
+ def __get_nodes_by_name(self, node_name):
+ """Get node by given role name
+
+ :param node_name: string
+ :rtype: devops.models.Node
+ """
+ LOG.debug('Trying to get nodes by role {0}'.format(node_name))
+ return self.__env.get_nodes(name=node_name)
+
@property
def master_nodes(self):
"""Get all master nodes
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 6454bdd..ebcc574 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -24,10 +24,12 @@
__config = None
__underlay = None
+ __hardware = None
- def __init__(self, config, underlay, salt):
+ def __init__(self, config, underlay, hardware, salt):
self.__config = config
self.__underlay = underlay
+ self.__hardware = hardware
self._salt = salt
super(OpenstackManager, self).__init__(
config=config, underlay=underlay)
@@ -88,3 +90,34 @@
file_name = result['stdout'][0].rstrip()
LOG.debug("Found files {0}".format(file_name))
r.download(destination=file_name, target=os.getcwd())
+
+ def get_node_name_by_subname(self, node_sub_name):
+ return [node_name for node_name
+ in self.__underlay.node_names()
+ if node_sub_name in node_name]
+
+ def warm_shutdown_openstack_nodes(self, node_sub_name, timeout=10 * 60):
+ """Gracefully shutting down the node """
+ node_names = self.get_node_name_by_subname(node_sub_name)
+ LOG.info('Shutting down nodes {}'.format(node_names))
+ for node in node_names:
+ LOG.debug('Shutdown node {0}'.format(node))
+ self.__underlay.check_call(cmd="shutdown +1", node_name=node)
+ for node in node_names:
+ LOG.info('Destroy node {}'.format(node))
+ self.__hardware.destroy_node(node)
+ self.__hardware.wait_for_node_state(
+ node, state='offline', timeout=timeout)
+
+ def warm_start_nodes(self, node_sub_name, timeout=10 * 60):
+ node_names = self.get_node_name_by_subname(node_sub_name)
+ LOG.info('Starting nodes {}'.format(node_names))
+ for node in node_names:
+ self.__hardware.start_node(node)
+ self.__hardware.wait_for_node_state(
+ node, state='active', timeout=timeout)
+
+ def warm_restart_nodes(self, node_names, timeout=10 * 60):
+ LOG.info('Reboot (warm restart) nodes {0}'.format(node_names))
+ self.warm_shutdown_openstack_nodes(node_names, timeout=timeout)
+ self.warm_start_nodes(node_names)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 53f5aee..c9f2f4b 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import random
import StringIO
@@ -22,6 +23,7 @@
import yaml
from tcp_tests import logger
+from tcp_tests.helpers import ext
from tcp_tests.helpers import utils
LOG = logger.logger
@@ -390,3 +392,44 @@
}
template = utils.render_template(file_path, options=options)
return yaml.load(template)
+
+ def get_logs(self, artifact_name,
+ node_role=ext.UNDERLAY_NODE_ROLES.salt_master):
+ master_node = [ssh for ssh in self.config_ssh
+ if node_role in ssh['roles']][0]
+ cmd = ("dpkg -l | grep formula > "
+ "/var/log/{0}_packages.output".format(master_node['node_name']))
+
+ tar_cmd = ('tar --absolute-names'
+ ' --warning=no-file-changed '
+ '-czf {t} {d}'.format(
+ t='{0}_log.tar.gz'.format(artifact_name), d='/var/log'))
+ minion_nodes = [ssh for ssh in self.config_ssh
+ if node_role not in ssh['roles']]
+ for node in minion_nodes:
+ try:
+ with self.remote(host=node['host']) as r_node:
+ r_node.check_call(('tar '
+ '--absolute-names '
+ '--warning=no-file-changed '
+ '-czf {t} {d}'.format(
+ t='{0}.tar.gz'.format(node['node_name']),
+ d='/var/log')),
+ verbose=True, raise_on_err=False)
+ except:
+ LOG.info("Can not ssh for node {}".format(node))
+ with self.remote(master_node['node_name']) as r:
+ for node in minion_nodes:
+ packages_minion_cmd = ("salt '{0}*' cmd.run "
+ "'dpkg -l' > /var/log/"
+ "{0}_packages.output".format(
+ node['node_name']))
+ r.check_call(packages_minion_cmd)
+ r.check_call("rsync {0}:/root/*.tar.gz "
+ "/var/log/".format(node['node_name']),
+ verbose=True, raise_on_err=False)
+ r.check_call(cmd)
+
+ r.check_call(tar_cmd)
+ r.download(destination='{0}_log.tar.gz'.format(artifact_name),
+ target=os.getcwd())
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 8a04c85..7a2d82d 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -279,7 +279,7 @@
ct.Cfg('kubernetes_virtlet_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_virtlet_image', ct.String(),
- help="", default='mirantis/virtlet:v0.7.0'),
+ help="", default='mirantis/virtlet:v0.8.0'),
ct.Cfg('kubernetes_externaldns_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_externaldns_image', ct.String(),
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
new file mode 100644
index 0000000..4f44638
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
@@ -0,0 +1,125 @@
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the OpenStack control VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
+ echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install nginx on prx nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..8234156
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
@@ -0,0 +1,89 @@
+nodes:
+ # Physical nodes
+
+ kvm01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ enp2s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ enp2s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ enp2s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp001.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_compute_node01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: parametrized_interface
+ deploy_address: 172.16.49.72
+ enp2s0f0:
+ role: parametrized_interface
+ single_address: 10.167.4.72
+ enp5s0f0:
+ role: parametrized_interface
+ tenant_address: 192.168.0.101
+
+ cmp002.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_compute_node02
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
+ role: parametrized_interface
+ deploy_address: 172.16.49.74
+ enp2s0f0:
+ role: parametrized_interface
+ single_address: 10.167.4.74
+ enp5s0f0:
+ role: parametrized_interface
+ tenant_address: 192.168.0.102
+
+# gtw01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_gateway_node01
+# roles:
+# - openstack_gateway
+# - linux_system_codename_xenial
+# interfaces:
+# enp9s0f0:
+# role: single_mgm
+# enp9s0f1:
+# role: bond0_ab_dvr_vlan_ctl_prv_floating
+
+# gtw02.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_gateway_node02
+# roles:
+# - openstack_gateway
+# - linux_system_codename_xenial
+# interfaces:
+# enp10s0f0:
+# role: single_mgm
+# enp10s0f1:
+# role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
new file mode 100644
index 0000000..ffcf909
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -0,0 +1,444 @@
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set PATTERN = os_env('PATTERN', 'smoke') %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# install contrail
+- description: Install contrail db
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 20}
+ skip_fail: false
+
+- description: Install contrail on 1st node and skip client part
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Install contrail on all nodes still skipping client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=Falsa
+ -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install contrail and do client part as well
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' state.sls opencontrail
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure contrail
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check contrail status
+ cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:control' cmd.run contrail-status
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+#- description: Install neutron on gtw node
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@neutron:gateway' state.sls neutron
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'ctl*' state.sls powerdns
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+#- description: Check neutron agent-list
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Allow all tcp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Allow all icmp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temp workaround of PROD-13167
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
+ 'apt-get install python-pymysql -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Install docker.io on gtw
+# cmd: salt-call cmd.run 'apt-get install docker.io -y'
+# node_name: {{ HOSTNAME_GTW01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Copy rc file
+# cmd: scp /root/keystonercv3 gtw01:/root
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+#- description: Run tests
+# cmd: |
+# if [[ {{ PATTERN }} == "false" ]]; then
+# docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
+# else
+# docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -e CUSTOM='--pattern {{ PATTERN }}' -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
+# fi
+# node_name: {{ HOSTNAME_GTW01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+#- description: Download xml results
+# download:
+# remote_path: /root
+# remote_filename: "report_*.xml"
+# local_path: {{ os_env('PWD') }}
+# node_name: {{ HOSTNAME_GTW01 }}
+# skip_fail: true
+
+#- description: Download html results
+# download:
+# remote_path: /root
+# remote_filename: "report_*.html"
+# local_path: {{ os_env('PWD') }}
+# node_name: {{ HOSTNAME_GTW01 }}
+# skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..da3e22a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,169 @@
+default_context:
+ cicd_enabled: 'False'
+ cluster_domain: cookied-bm-mcp-ocata-contrail.local
+ cluster_name: deployment_name
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: enp5s0f0
+ compute_primary_second_nic: enp5s0f1
+ context_seed: WCQ00jbWQE6qxjDdhHsS7SNGExTJ9HVanC9LXyJHF2IIe0Qj6vtaXFP5FSwEK6jm
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '2422'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.126
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.240
+ infra_primary_first_nic: eth2
+ infra_primary_second_nic: eth3
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.4.91
+ maas_hostname: cfg01
+ opencontrail_analytics_address: 10.167.4.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: 10.167.4.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: 10.167.4.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: 10.167.4.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: 10.167.4.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: 10.167.4.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: 10.167.4.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: 10.167.4.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: 10.167.4.100
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: 10.167.4.101
+ opencontrail_router02_hostname: rtr02
+ openstack_benchmark_node01_address: 10.167.4.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_compute_count: '1'
+ openstack_compute_rack01_hostname: cmpt
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+
+ openstack_compute_node01_hostname: cmp001
+ openstack_compute_node02_hostname: cmp002
+ openstack_compute_node01_address: 10.167.4.72
+ openstack_compute_node02_address: 10.167.4.74
+ openstack_compute_node01_single_address: 10.167.4.72
+ openstack_compute_node02_single_address: 10.167.4.74
+ openstack_compute_node01_deploy_address: 172.16.49.72
+ openstack_compute_node02_deploy_address: 172.16.49.74
+
+ openstack_control_address: 10.167.4.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.4.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.4.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.4.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.4.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.4.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.4.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: 10.167.4.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.4.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.4.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.4.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_proxy_address: 10.167.4.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.4.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.4.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_telemetry_address: 10.167.4.75
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 10.167.4.76
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 10.167.4.77
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 10.167.4.78
+ openstack_telemetry_node03_hostname: mdb03
+ openstack_version: ocata
+ oss_enabled: 'False'
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BIDWhCG4zuArfGwwKDuh3q32jdTLrWS1
+ salt_api_password_hash: $6$bGHqshet$Rf1A7.SWK.8VattpFelJ8yC4OHdnXCZdIIRsMLtoOb1ZDDO7meEEpqTWJY4xpQbXaXwO0aLjVDcF34ucfuxpb1
+ salt_master_address: 10.167.4.66
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_notification_address: alerts@localhost
+ stacklight_notification_smtp_host: 127.0.0.1
+ stacklight_telemetry_address: 10.167.4.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ tenant_network_gateway: 192.168.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ tenant_vlan: '2423'
+ upstream_proxy_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml
new file mode 100644
index 0000000..d9a2193
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml
@@ -0,0 +1,264 @@
+nodes:
+ # Virtual Control Plane nodes
+
+ ctl01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mdb01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - openstack_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mdb02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - openstack_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mdb03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - openstack_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mon03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ nal01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ nal02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ nal03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ ntw01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ ntw02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+ ntw03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_trusty
+ interfaces:
+ eth1:
+ role: single_ctl
+
+
+ mtr01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ mtr03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log01.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log02.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ log03.cookied-bm-mcp-ocata-contrail.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
new file mode 100644
index 0000000..dfaac94
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -0,0 +1,157 @@
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas"') }}
+
+- description: "Fix salt-formula/salt VCP creation. Delete as fast as possible"
+ cmd: |
+ set -e;
+ mkdir -p /tmp/fix_vcp;
+ cd /tmp/fix_vcp;
+ git clone https://gerrit.mcp.mirantis.net/salt-formulas/salt;
+ cd salt;
+ git fetch https://gerrit.mcp.mirantis.net/salt-formulas/salt refs/changes/10/11210/1 && git checkout FETCH_HEAD;
+ cp _modules/virtng.py /usr/share/salt-formulas/env/_modules/;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: "Workaround for rack01 compute generator"
+ cmd: |
+ set -e;
+ # Remove rack01 key
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ # Add openstack_compute_node definition from system
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+ # Set ipaddresses
+# salt-call reclass.cluster_meta_set openstack_compute_node01_single_address 10.167.4.72 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+# salt-call reclass.cluster_meta_set openstack_compute_node02_single_address 10.167.4.74 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+# salt-call reclass.cluster_meta_set openstack_compute_node01_deploy_address 172.16.49.72 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+# salt-call reclass.cluster_meta_set openstack_compute_node02_deploy_address 172.16.49.74 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+
+- description: "Workaround for PROD-14060"
+ cmd: |
+ set -e;
+ # Add tenant and single addresses for computes
+ salt-call reclass.cluster_meta_set deploy_address 172.16.49.72 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set single_address 10.167.4.72 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+
+ salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set single_address 10.167.4.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Workaround for PROD-15087"
+ cmd: |
+ set -e;
+ reclass-tools add-key 'classes' 'system.linux.system.repo.mcp.openstack' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/stacklight/telemetry.yml --merge;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Create VMs for control plane
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+ cmd: |
+ salt-key -l acc| sort > /tmp/current_keys.txt &&
+ salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 20, delay: 30}
+ skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Show reclass-salt --top for generated nodes
+ cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+#- description: Hack gtw node
+# cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: Hack cmp01 node
+# cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: Hack cmp02 node
+# cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
new file mode 100644
index 0000000..10d0d9c
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -0,0 +1,187 @@
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install docker swarm
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on StackLight mon nodes
+ cmd: |
+ SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
+ echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure docker service
+ cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure collector
+ cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Change environment configuration before deploy
+- description: Set SL docker images deploy parameters
+ cmd: |
+ {% for sl_opt, value in config.sl_deploy.items() %}
+ {% if value|string() %}
+ salt-call reclass.cluster_meta_set {{ sl_opt }} {{ value }};
+ {% endif %}
+ {% endfor %}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
similarity index 76%
copy from tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml
copy to tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
index 3bc891d..cb2ef9f 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
@@ -1,94 +1,97 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- # WARNING! On CID* nodes, admin network is connected to ens4, and control network to ens3 (as in the model)
- # On other nodes (cfg01 and openstack), admin network is connected to ens3, and control network to ens4
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 8G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet manual
- auto ens4
- iface ens4 inet dhcp
-
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+ #- sudo ifup ens4
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - apt-get update
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
similarity index 87%
rename from tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml
rename to tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
index 3bc891d..a362d86 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
@@ -1,94 +1,94 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- # WARNING! On CID* nodes, admin network is connected to ens4, and control network to ens3 (as in the model)
- # On other nodes (cfg01 and openstack), admin network is connected to ens3, and control network to ens4
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 8G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet manual
- auto ens4
- iface ens4 inet dhcp
-
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+ #- sudo ifup ens4
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ - eatmydata apt-get install -y linux-generic-hwe-16.04
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
new file mode 100644
index 0000000..f464e3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
@@ -0,0 +1,488 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-ocata-contrail') %}
+{% set DOMAIN_NAME = 'cookied-bm-mcp-ocata-contrail.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+# {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+# {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.72') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
+# {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-ocata-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +62
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ # default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+ # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ # virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ # virtual_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+ # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ #ip_ranges:
+ # dhcp: [+2, -4]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+
+ groups:
+
+ - name: virtual
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ # Ironic management interface
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ parent_iface:
+ phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+ #- label: ens4
+ # l2_network_device: private
+ # interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ #ens4:
+ # networks:
+ # - private
+
+
+ - name: default
+ driver:
+ name: devops_driver_ironic
+ params:
+ os_auth_token: fake-token
+ ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
+ # to access Ironic API
+ # Agent URL that is accessible from deploying node when nodes
+ # are bootstrapped with PXE. Usually PXE/provision network address is used.
+ agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+ agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+ network_pools:
+ admin: admin-pool01
+
+ nodes:
+
+ # - name: {{ HOSTNAME_CFG01 }}
+ # role: salt_master
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ # interfaces:
+ # - label: enp3s0f0 # Infra interface
+ # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+ # - label: enp3s0f1
+ # l2_network_device: admin
+ # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+ # network_config:
+ # enp3s0f0:
+ # networks:
+ # - infra
+ # enp3s0f1:
+ # networks:
+ # - admin
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp2s0f1
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp2s0f1
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ # - label: eno1
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+ # - label: eno2
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+ network_config:
+ # eno1:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp2s0f1
+
+
+ - name: {{ HOSTNAME_CMP001 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: enp5s0f2
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+ # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp5s0f0
+ - enp5s0f1
+
+
+
+ - name: {{ HOSTNAME_CMP002 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ # - label: eno1
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+ # - label: eth0
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+ # - label: eth3
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ # - label: eth2
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: eth4
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+ # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+ network_config:
+ enp2s0f1:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp5s0f0
+ - enp5s0f1
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
index 891fb2e..bfe5673 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
@@ -283,44 +283,14 @@
skip_fail: false
# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Set disks 03
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+- description: Create physical volumes on a second disk
+ cmd: salt 'ctl*' cmd.run 'pvcreate -y /dev/vdb'
+ node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -349,11 +319,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Temporary WR set enabled backends value 03
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
index 5b9b09d..33ad49b 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
@@ -53,6 +53,20 @@
retry: {count: 1, delay: 5}
skip_fail: false
+# Elasticsearch (system service)
+#-------------------------------
+- description: Setup Elasticsearch
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Setup Elasticsearch
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
# Setup Docker Swarm
#-------------------
@@ -255,23 +269,21 @@
retry: {count: 3, delay: 10}
skip_fail: false
-# Elasticsearch
+# Elasticsearch (in container, disabled until https://mirantis.jira.com/browse/PROD-15297 is not fixed)
#--------------
-
-- description: 'Waiting for Elasticsearch to come up in container...'
- cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Setup Elasticsearch
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+#- description: 'Waiting for Elasticsearch to come up in container...'
+# cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
+# 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+# while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 3, delay: 10}
+# skip_fail: false
+#
+#- description: Setup Elasticsearch
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 3, delay: 10}
+# skip_fail: false
# Generate docs
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
index cd2180b..f2128d9 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
@@ -53,16 +53,16 @@
infra_deploy_nic: eth0
maas_deploy_address: 10.167.4.91
maas_hostname: mas01
- infra_kvm01_control_address: 10.167.4.91
+ infra_kvm01_control_address: ${_param:cicd_control_node01_address}
infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.92
+ infra_kvm01_hostname: ${_param:cicd_control_node01_hostname}
+ infra_kvm02_control_address: ${_param:cicd_control_node02_address}
infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.93
+ infra_kvm02_hostname: ${_param:cicd_control_node02_hostname}
+ infra_kvm03_control_address: ${_param:cicd_control_node03_address}
infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.90
+ infra_kvm03_hostname: ${_param:cicd_control_node03_hostname}
+ infra_kvm_vip_address: ${_param:cicd_control_vip_address}
infra_primary_first_nic: eth1
infra_primary_second_nic: eth2
kubernetes_enabled: 'False'
@@ -82,14 +82,14 @@
openstack_control_node02_hostname: ctl02
openstack_control_node03_address: 10.167.4.13
openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.10
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.4.11
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.4.12
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.4.13
- openstack_database_node03_hostname: dbs03
+ openstack_database_address: ${_param:openstack_control_address}
+ openstack_database_hostname: ${_param:openstack_control_hostname}
+ openstack_database_node01_address: ${_param:openstack_control_node01_address}
+ openstack_database_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_database_node02_address: ${_param:openstack_control_node02_address}
+ openstack_database_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_database_node03_address: ${_param:openstack_control_node03_address}
+ openstack_database_node03_hostname: ${_param:openstack_control_node03_hostname}
openstack_enabled: 'True'
openstack_gateway_node01_address: 10.167.4.224
openstack_gateway_node01_hostname: gtw01
@@ -100,35 +100,35 @@
openstack_gateway_node03_address: 10.167.4.226
openstack_gateway_node03_hostname: gtw03
openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.10
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.4.11
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.4.12
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.4.13
- openstack_message_queue_node03_hostname: msg03
+ openstack_message_queue_address: ${_param:openstack_control_address}
+ openstack_message_queue_hostname: ${_param:openstack_control_hostname}
+ openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
+ openstack_message_queue_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
+ openstack_message_queue_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
+ openstack_message_queue_node03_hostname: ${_param:openstack_control_node03_hostname}
openstack_network_engine: ovs
openstack_nfv_dpdk_enabled: 'False'
openstack_nfv_sriov_enabled: 'False'
openstack_ovs_dvr_enabled: 'True'
openstack_neutron_qos: 'False'
openstack_ovs_encapsulation_type: vlan
- openstack_ovs_encapsulation_vlan_range: 2416:2420
+ openstack_ovs_encapsulation_vlan_range: 2418:2420
openstack_proxy_address: 10.167.4.80
openstack_proxy_hostname: prx
openstack_proxy_node01_address: 10.167.4.81
openstack_proxy_node01_hostname: prx01
openstack_proxy_node02_address: 10.167.4.82
openstack_proxy_node02_hostname: prx02
- openstack_telemetry_address: 10.167.4.10
- openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_address: 10.167.4.11
- openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_address: 10.167.4.12
- openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_address: 10.167.4.13
- openstack_telemetry_node03_hostname: mdb03
+ openstack_telemetry_address: ${_param:openstack_control_address}
+ openstack_telemetry_hostname: ${_param:openstack_control_hostname}
+ openstack_telemetry_node01_address: ${_param:openstack_control_node01_address}
+ openstack_telemetry_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_telemetry_node02_address: ${_param:openstack_control_node02_address}
+ openstack_telemetry_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_telemetry_node03_address: ${_param:openstack_control_node03_address}
+ openstack_telemetry_node03_hostname: ${_param:openstack_control_node03_hostname}
openstack_benchmark_node01_hostname: bmk01
openstack_benchmark_node01_address: 10.167.4.85
openstack_version: ocata
@@ -139,14 +139,6 @@
salt_master_management_address: 10.167.5.15
stacklight_enabled: 'True'
stacklight_version: '2'
- stacklight_log_address: 10.167.4.70
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.4.71
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.4.72
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.4.73
- stacklight_log_node03_hostname: log03
stacklight_monitor_address: 10.167.4.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.4.71
@@ -155,14 +147,22 @@
stacklight_monitor_node02_hostname: mon02
stacklight_monitor_node03_address: 10.167.4.73
stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.4.70
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.4.71
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.4.72
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.4.73
- stacklight_telemetry_node03_hostname: mtr03
+ stacklight_log_address: ${_param:stacklight_monitor_address}
+ stacklight_log_hostname: ${_param:stacklight_monitor_hostname}
+ stacklight_log_node01_address: ${_param:stacklight_monitor_node01_address}
+ stacklight_log_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
+ stacklight_log_node02_address: ${_param:stacklight_monitor_node02_address}
+ stacklight_log_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
+ stacklight_log_node03_address: ${_param:stacklight_monitor_node03_address}
+ stacklight_log_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
+ stacklight_telemetry_address: ${_param:stacklight_monitor_address}
+ stacklight_telemetry_hostname: ${_param:stacklight_monitor_hostname}
+ stacklight_telemetry_node01_address: ${_param:stacklight_monitor_node01_address}
+ stacklight_telemetry_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
+ stacklight_telemetry_node02_address: ${_param:stacklight_monitor_node02_address}
+ stacklight_telemetry_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
+ stacklight_telemetry_node03_address: ${_param:stacklight_monitor_node03_address}
+ stacklight_telemetry_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
tenant_network_gateway: ''
tenant_network_netmask: 255.255.255.0
tenant_vlan: '20'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
index dff52d9..51d430d 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
@@ -40,7 +40,7 @@
#- sudo ifup ens4
# Create swap
- - fallocate -l 4G /swapfile
+ - fallocate -l 16G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
index 677c392..a5f6916 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
@@ -42,7 +42,7 @@
#- sudo ifup ens4
# Create swap
- - fallocate -l 4G /swapfile
+ - fallocate -l 16G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
index 4cf97fa..9893151 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
@@ -20,7 +20,6 @@
{% import 'cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml' as CLOUDINIT_USER_DATA_CICD with context %}
{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
@@ -28,7 +27,6 @@
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_cicd {{ CLOUDINIT_USER_DATA_CICD }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
template:
@@ -174,13 +172,13 @@
driver:
name: devops.driver.libvirt
params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
+ storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
stp: False
hpet: False
enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+ use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
+ use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
network_pools:
admin: admin-pool01
@@ -212,7 +210,7 @@
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+ source_image: {{ os_env('IMAGE_PATH1604') }} # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
# http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
format: qcow2
@@ -220,15 +218,15 @@
- name: {{ HOSTNAME_CFG01 }}
role: salt_master
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 3072
+ vcpu: {{ os_env('CFG_NODE_CPU', 2) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 4096) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -258,15 +256,15 @@
- name: {{ HOSTNAME_CID01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -296,15 +294,15 @@
- name: {{ HOSTNAME_CID02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -322,15 +320,15 @@
- name: {{ HOSTNAME_CID03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -348,15 +346,15 @@
- name: {{ HOSTNAME_CTL01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
+ memory: {{ os_env('CTL_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -377,15 +375,15 @@
- name: {{ HOSTNAME_CTL02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
+ memory: {{ os_env('CTL_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -406,15 +404,15 @@
- name: {{ HOSTNAME_CTL03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
+ memory: {{ os_env('CTL_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -435,15 +433,15 @@
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ vcpu: {{ os_env('MON_NODE_CPU', 2) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -464,15 +462,15 @@
- name: {{ HOSTNAME_MON02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 6144
+ vcpu: {{ os_env('MON_NODE_CPU', 2) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -493,15 +491,15 @@
- name: {{ HOSTNAME_MON03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ vcpu: {{ os_env('MON_NODE_CPU', 2) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 12288) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -522,15 +520,15 @@
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: {{ os_env('MON_NODE_CPU', 1) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 4096) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -551,15 +549,15 @@
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 3072
+ vcpu: {{ os_env('CMP_NODE_CPU', 2) }}
+ memory: {{ os_env('CMP_NODE_MEMORY', 3072) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CMP_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -599,28 +597,28 @@
networks:
- external
-# - name: {{ HOSTNAME_CMP02 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 2
-# memory: !os_env SLAVE_NODE_MEMORY, 3072
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CMP_NODE_CPU', 2) }}
+ memory: {{ os_env('CMP_NODE_MEMORY', 3072) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CMP_NODE_VOLUME_SIZE', 150) }}
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
interfaces: *all_interfaces
network_config: *all_network_config
@@ -628,15 +626,15 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: {{ os_env('GTW_NODE_CPU', 1) }}
+ memory: {{ os_env('GTW_NODE_MEMORY', 2048) }}
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('GTW_NODE_VOLUME_SIZE', 150) }}
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
index 79ef4da..1a4a5f5 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
@@ -295,11 +295,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Set disks 03
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
- description: Create partitions 01
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
@@ -309,13 +309,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -349,11 +349,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Temporary WR set enabled backends value 03
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
index c520c5c..332473c 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,7 +10,7 @@
ens4:
role: single_vlan_ctl
- kvm01.mcp11-ovs-dpdk.local:
+ kvm01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_kvm_node01
roles:
- infra_kvm
@@ -21,7 +21,7 @@
ens4:
role: single_vlan_ctl
- kvm02.mcp11-ovs-dpdk.local:
+ kvm02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_kvm_node02
roles:
- infra_kvm
@@ -32,7 +32,7 @@
ens4:
role: single_vlan_ctl
- kvm03.mcp11-ovs-dpdk.local:
+ kvm03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_kvm_node03
roles:
- infra_kvm
@@ -43,7 +43,7 @@
ens4:
role: single_vlan_ctl
- cid01.mcp11-ovs-dpdk.local:
+ cid01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: cicd_control_node01
roles:
- cicd_control_leader
@@ -54,7 +54,7 @@
ens4:
role: single_vlan_ctl
- cid02.mcp11-ovs-dpdk.local:
+ cid02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: cicd_control_node02
roles:
- cicd_control_manager
@@ -65,7 +65,7 @@
ens4:
role: single_vlan_ctl
- cid03.mcp11-ovs-dpdk.local:
+ cid03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: cicd_control_node03
roles:
- cicd_control_manager
@@ -76,7 +76,7 @@
ens4:
role: single_vlan_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_control_node01
roles:
- openstack_control_leader
@@ -89,7 +89,7 @@
ens4:
role: single_vlan_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_control_node02
roles:
- openstack_control
@@ -101,7 +101,7 @@
ens4:
role: single_vlan_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_control_node03
roles:
- openstack_control
@@ -113,7 +113,7 @@
ens4:
role: single_vlan_ctl
- dbs01.mcp11-ovs-dpdk.local:
+ dbs01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_database_node01
roles:
- openstack_database_leader
@@ -125,7 +125,7 @@
ens4:
role: single_vlan_ctl
- dbs02.mcp11-ovs-dpdk.local:
+ dbs02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_database_node02
roles:
- openstack_database
@@ -137,7 +137,7 @@
ens4:
role: single_vlan_ctl
- dbs03.mcp11-ovs-dpdk.local:
+ dbs03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_database_node03
roles:
- openstack_database
@@ -149,7 +149,7 @@
ens4:
role: single_vlan_ctl
- msg01.mcp11-ovs-dpdk.local:
+ msg01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_message_queue_node01
roles:
- openstack_message_queue
@@ -160,7 +160,7 @@
ens4:
role: single_vlan_ctl
- msg02.mcp11-ovs-dpdk.local:
+ msg02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_message_queue_node02
roles:
- openstack_message_queue
@@ -171,7 +171,7 @@
ens4:
role: single_vlan_ctl
- msg03.mcp11-ovs-dpdk.local:
+ msg03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_message_queue_node03
roles:
- openstack_message_queue
@@ -182,7 +182,7 @@
ens4:
role: single_vlan_ctl
- mdb01.mcp11-ovs-dpdk.local:
+ mdb01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_telemetry_node01
roles:
- openstack_telemetry
@@ -193,7 +193,7 @@
ens4:
role: single_vlan_ctl
- mdb02.mcp11-ovs-dpdk.local:
+ mdb02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_telemetry_node02
roles:
- openstack_telemetry
@@ -204,7 +204,7 @@
ens4:
role: single_vlan_ctl
- mdb03.mcp11-ovs-dpdk.local:
+ mdb03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_telemetry_node03
roles:
- openstack_telemetry
@@ -215,7 +215,7 @@
ens4:
role: single_vlan_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
@@ -226,7 +226,7 @@
ens4:
role: single_vlan_ctl
- prx02.mcp11-ovs-dpdk.local:
+ prx02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_proxy_node02
roles:
- openstack_proxy
@@ -237,7 +237,40 @@
ens4:
role: single_vlan_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mon01.mcp-ocata-dvr-vxlan.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.mcp-ocata-dvr-vxlan.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.mcp-ocata-dvr-vxlan.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mtr01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -248,7 +281,7 @@
ens4:
role: single_vlan_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -259,7 +292,7 @@
ens4:
role: single_vlan_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -270,7 +303,7 @@
ens4:
role: single_vlan_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -281,7 +314,7 @@
ens4:
role: single_vlan_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -292,7 +325,7 @@
ens4:
role: single_vlan_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -304,7 +337,7 @@
role: single_vlan_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
@@ -319,7 +352,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -334,7 +367,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw02.mcp11-ovs-dpdk.local:
+ gtw02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node02
roles:
- openstack_gateway
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml
deleted file mode 100644
index 3bc891d..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- # WARNING! On CID* nodes, admin network is connected to ens4, and control network to ens3 (as in the model)
- # On other nodes (cfg01 and openstack), admin network is connected to ens3, and control network to ens4
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 8G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet manual
- auto ens4
- iface ens4 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
index 677c392..a5f6916 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
@@ -42,7 +42,7 @@
#- sudo ifup ens4
# Create swap
- - fallocate -l 4G /swapfile
+ - fallocate -l 16G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
index 35309f0..823002f 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -4,21 +4,38 @@
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr-vxlan') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID = os_env('HOSTNAME_CID', 'cid.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM = os_env('HOSTNAME_KVM', 'kvm.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL = os_env('HOSTNAME_CTL', 'ctl.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON = os_env('HOSTNAME_MON', 'mon.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG = os_env('HOSTNAME_LOG', 'log.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR = os_env('HOSTNAME_MTR', 'mtr.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
@@ -26,7 +43,6 @@
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml' as CLOUDINIT_USER_DATA_CICD with context %}
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
@@ -34,7 +50,6 @@
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_cicd {{ CLOUDINIT_USER_DATA_CICD }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
template:
@@ -84,6 +99,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -140,6 +159,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -197,6 +220,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -253,6 +280,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -392,57 +423,57 @@
networks:
- private
-# - name: {{ HOSTNAME_CID02 }}
-# role: salt_minion
-# params:
-## vcpu: !os_env SLAVE_NODE_CPU, 2
-# memory: !os_env SLAVE_NODE_MEMORY, 6144
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
-# - name: {{ HOSTNAME_CID03 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 2
-# memory: !os_env SLAVE_NODE_MEMORY, 6144
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
# KVM* nodes required for services like glusterfs.server
- name: {{ HOSTNAME_KVM01 }}
@@ -627,57 +658,57 @@
interfaces: *interfaces
network_config: *network_config
-# - name: {{ HOSTNAME_MSG02 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ - name: {{ HOSTNAME_MSG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
-# - name: {{ HOSTNAME_MSG03 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
- name: {{ HOSTNAME_CTL01 }}
role: salt_minion
@@ -737,34 +768,268 @@
interfaces: *interfaces
network_config: *network_config
-# - name: {{ HOSTNAME_CTL03 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 2
-# memory: !os_env SLAVE_NODE_MEMORY, 6144
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: cinder
-# capacity: 50
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index eb108c9..5b27e3a 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -170,7 +170,7 @@
{%- endmacro %}
-{%- macro MACRO_GENERATE_COOKIECUTTER_MODEL(IS_CONTRAIL_LAB=false) %}
+{%- macro MACRO_GENERATE_COOKIECUTTER_MODEL(IS_CONTRAIL_LAB=false, CONTROL_VLAN=None, TENANT_VLAN=None) %}
{###################################################################}
{%- set CLUSTER_CONTEXT_PATH = '/tmp/' + CLUSTER_CONTEXT_NAME %}
- description: "[EXPERIMENTAL] Upload cookiecutter-templates context to {{ HOSTNAME_CFG01 }}"
@@ -194,12 +194,16 @@
# Override some context parameters
sed -i 's/cluster_name:.*/cluster_name: {{ LAB_CONFIG_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/cluster_domain:.*/cluster_domain: {{ DOMAIN_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/control_vlan:.*/control_vlan: \"2416\"/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/tenant_vlan:.*/tenant_vlan: \"2417\"/g' {{ CLUSTER_CONTEXT_PATH }}
+ {%- if CONTROL_VLAN %}
+ sed -i 's/control_vlan:.*/control_vlan: {{ CONTROL_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ {%- endif %}
+ {%- if TENANT_VLAN %}
+ sed -i 's/tenant_vlan:.*/tenant_vlan: {{ TENANT_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ {%- endif %}
# Temporary workaround (with hardcoded address .90 -> .15) of bug https://mirantis.jira.com/browse/PROD-14377
- sed -i 's/salt_master_address:.*/salt_master_address: {{ IPV4_NET_CONTROL_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/salt_master_management_address:.*/salt_master_management_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
+ # sed -i 's/salt_master_address:.*/salt_master_address: {{ IPV4_NET_CONTROL_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
+ # sed -i 's/salt_master_management_address:.*/salt_master_management_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
# Replace firstly to an intermediate value to avoid intersection between
# already replaced and replacing networks.
@@ -270,10 +274,11 @@
{%- macro MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() %}
{########################################################}
-- description: "[EXPERIMENTAL] Upload 'environment' to {{ HOSTNAME_CFG01 }}"
- upload:
- local_path: {{ config.salt_deploy.environment_template_dir }}
- remote_path: /tmp/environment/
+- description: "[EXPERIMENTAL] Clone 'environment-template' repository to {{ HOSTNAME_CFG01 }}"
+ cmd: |
+ set -e;
+ mkdir -p /tmp/environment/;
+ export GIT_SSL_NO_VERIFY=true; git clone https://github.com/Mirantis/environment-template /tmp/environment/environment_template
node_name: {{ HOSTNAME_CFG01 }}
skip_fail: false
@@ -317,8 +322,6 @@
- description: "[EXPERIMENTAL] Create environment model for virtual environment"
cmd: |
set -e;
- ln -s '/tmp/environment/environment_template/{{ '{# interfaces #}' }}' '/tmp/environment/environment_template/{{ '{{ cookiecutter._env_name }}' }}/';
- ln -s '/tmp/environment/environment_template/{{ '{# roles #}' }}' '/tmp/environment/environment_template/{{ '{{ cookiecutter._env_name }}' }}/';
reclass-tools render --template-dir /tmp/environment/environment_template/ \
--output-dir /srv/salt/reclass/classes/environment/ \
{% for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %} --context /tmp/environment/{{ENVIRONMENT_CONTEXT_NAME}}{% endfor %} \
@@ -326,6 +329,15 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+
+- description: Modify generated model and reclass-system
+ cmd: |
+ export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
{%- endmacro %}
@@ -457,9 +469,10 @@
- description: Replace needed formulas to desired version
cmd: |
set -e;
- {%- for formula in SALT_FORMULAS_REFS.split(' ') %}
- {% set formula_name = formula.0 %}
- {% set formula_ref = formula.1 %}
+ {%- for formula_set in SALT_FORMULAS_REFS.split(' ') %}
+ {% set formula = formula_set.split(':') %}
+ {% set formula_name = formula[0] %}
+ {% set formula_ref = formula[1] %}
{% set formula_dir = '/tmp/salt-formula-' + formula_name %}
git clone {{ SALT_FORMULAS_REPO }}/{{ formula_name }} {{ formula_dir }} &&
pushd {{ formula_dir }} &&
@@ -469,8 +482,8 @@
if [ -d "{{ formula_dir }}" ]; then
echo "Going to replace packaged formula {{ formula_name }}" &&
rm -rfv /usr/share/salt-formulas/{env,reclass/service}/{{ formula_name }} &&
- ln -v -s \"{{ formula_dir }}/{{ formula_name }}\" \"/usr/share/salt-formulas/env/{{ formula_name }}\" &&
- ln -v -s \"{{ formula_dir }}/metadata/service/\" \"/usr/share/salt-formulas/reclass/service/{{ formula_name }}\";
+ ln -v -s "{{ formula_dir }}/{{ formula_name }}" "/usr/share/salt-formulas/env/{{ formula_name }}" &&
+ ln -v -s "{{ formula_dir }}/metadata/service/" "/usr/share/salt-formulas/reclass/service/{{ formula_name }}";
else
echo "Stopped, directory /root/salt-formula-{{ formula_name }} does not exist!";
fi
@@ -600,15 +613,15 @@
{%- macro MACRO_NETWORKING_WORKAROUNDS() %}
{#########################################}
-- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
- "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
- git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
- cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
- cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+#- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
+# cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
+# "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
+# git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
+# cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
+# cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
- description: '*Workaround: Load bonding module before call state.linux'
cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml
index fa6203e..4e7e234 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml
@@ -294,12 +294,12 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
index dad874b..6a5aa50 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
@@ -320,13 +320,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
index c78e1b7..569ae2d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
@@ -302,13 +302,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
index 5b2d223..4a54200 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
@@ -270,12 +270,12 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
index d967df3..41ab7aa 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
@@ -288,13 +288,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -350,4 +350,4 @@
cmd: scp /root/keystonercv3 gtw01:/root
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
index c6dbe35..658ddd5 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
@@ -206,7 +206,7 @@
- description: Create net04
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
+ '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -220,7 +220,7 @@
- description: Create router
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
+ '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -280,12 +280,12 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
index 9971bd6..821e44e 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
@@ -288,13 +288,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -350,4 +350,4 @@
cmd: scp /root/keystonercv3 gtw01:/root
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/tests/system/test_failover.py b/tcp_tests/tests/system/test_failover.py
new file mode 100644
index 0000000..a8bb6b8
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover.py
@@ -0,0 +1,205 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class TestFailover(object):
+ """Test class for testing OpenStack nodes failover"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_ctl01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test warm shutdown ctl01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Shutdown ctl01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('ctl01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_ctl01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test restart ctl01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Restart ctl01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('ctl01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_cmp01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test warm shutdown cmp01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Shutdown cmp01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('cmp01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_cmp01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test restart cmp01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Restart cmp01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('cmp01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_mon01_node(self, underlay, openstack_deployed,
+ openstack_actions, sl_deployed,
+ show_step):
+ """Test restart mon01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Restart mon01
+ 5. Run LMA smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('mon01')
+ # STEP #5
+ show_step(5)
+ # Run SL component tetsts
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/test_smoke.py',
+ 'test_alerts.py')
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_mon01_node(self, underlay, openstack_deployed,
+ openstack_actions, sl_deployed,
+ show_step):
+ """Test warm shutdown mon01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Shutdown mon01
+ 5. Run LMA smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('mon01')
+ # STEP #5
+ show_step(5)
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/test_smoke.py',
+ 'test_alerts.py')
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_cookied_ocata.py b/tcp_tests/tests/system/test_install_cookied_ocata.py
index 125168d..22f4b93 100644
--- a/tcp_tests/tests/system/test_install_cookied_ocata.py
+++ b/tcp_tests/tests/system/test_install_cookied_ocata.py
@@ -23,6 +23,7 @@
class Test_Mcp11_install(object):
"""Test class for testing mcp11 vxlan deploy"""
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_cookied_ocata_ovs_install(self, underlay, openstack_deployed,
show_step):
@@ -35,6 +36,7 @@
"""
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_cookied_ocata_dvr_install(self, underlay, openstack_deployed,
show_step):
@@ -47,9 +49,10 @@
"""
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
- def test_cookied_ocata_cicd_oss_install(self, underlay, oss_deployed,
- openstack_deployed, sl_deployed,
+ def test_cookied_ocata_cicd_oss_install(self, underlay, openstack_deployed,
+ oss_deployed, sl_deployed,
show_step):
"""Test for deploying an mcp environment and check it
Scenario:
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index 6c926d8..7109497 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -24,6 +24,7 @@
class Testk8sInstall(object):
"""Test class for testing Kubernetes deploy"""
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8116
def test_k8s_install_calico(self, config, show_step,
@@ -126,6 +127,7 @@
k8s_actions.run_conformance()
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8115
def test_k8s_install_contrail(self, config, show_step,
@@ -186,6 +188,7 @@
k8s_actions.run_conformance()
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_only_k8s_install(self, config, k8s_deployed, k8s_actions):
"""Test for deploying MCP environment with k8s and check it
diff --git a/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py b/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
index 99f96e2..9235edb 100644
--- a/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
+++ b/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
@@ -24,6 +24,7 @@
class TestMcp11Install(object):
"""Test class for testing mcp11 vxlan deploy"""
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_mcp11_newton_ovs_install(self, underlay, openstack_deployed,
openstack_actions, show_step):
@@ -45,6 +46,7 @@
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_mcp11_newton_dvr_install(self, underlay, openstack_deployed,
openstack_actions, show_step):
diff --git a/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py b/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py
index 7f2daf5..9579139 100644
--- a/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py
+++ b/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py
@@ -24,6 +24,7 @@
class Test_Mcp11_install(object):
"""Test class for testing mcp11 vxlan deploy"""
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8119
def test_mcp11_ocata_ovs_install(self, underlay,
@@ -46,6 +47,7 @@
openstack_actions.download_tempest_report()
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8119
def test_mcp11_ocata_ovs_sl_install(self, underlay, config,
@@ -80,6 +82,7 @@
'/root/stacklight-pytest/stacklight_tests/report.xml')
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8120
def test_mcp11_ocata_dvr_install(self,
@@ -102,6 +105,7 @@
openstack_actions.download_tempest_report()
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8120
def test_mcp11_ocata_dvr_sl_install(self, underlay, config,
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index ae3fa89..d7ee5a1 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -23,6 +23,7 @@
class TestMCPK8sActions(object):
"""Test class for different k8s actions"""
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
"""Test externaldns integration with coredns
diff --git a/tcp_tests/tests/system/test_openstack_service_policy.py b/tcp_tests/tests/system/test_openstack_service_policy.py
index bf1ebb1..3b0e611 100644
--- a/tcp_tests/tests/system/test_openstack_service_policy.py
+++ b/tcp_tests/tests/system/test_openstack_service_policy.py
@@ -243,6 +243,7 @@
"""Test class for testing OpenStack services policy"""
# https://github.com/salt-formulas/salt-formula-nova/pull/17 - Merged
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_policy_for_nova(self, underlay, openstack_deployed, salt_actions,
show_step):
@@ -264,6 +265,7 @@
LOG.info("*************** DONE **************")
# https://github.com/salt-formulas/salt-formula-cinder/pull/13 - Merged
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_policy_for_cinder(self, underlay, openstack_deployed,
salt_actions, show_step):
@@ -289,6 +291,7 @@
LOG.info("*************** DONE **************")
# https://github.com/salt-formulas/salt-formula-heat/pull/5 - Merged
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_policy_for_heat(self, underlay, openstack_deployed, salt_actions,
show_step):
@@ -314,6 +317,7 @@
LOG.info("*************** DONE **************")
# https://github.com/salt-formulas/salt-formula-glance/pull/9 - Merged
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_policy_for_glance(self, underlay, openstack_deployed,
salt_actions, show_step):
@@ -339,6 +343,7 @@
LOG.info("*************** DONE **************")
# https://github.com/salt-formulas/salt-formula-ceilometer/pull/2 - Merged
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.skip(reason="Skipped due no have ceilometer in environment")
def test_policy_for_ceilometer(self, underlay, openstack_deployed,
@@ -365,6 +370,7 @@
LOG.info("*************** DONE **************")
# https://github.com/salt-formulas/salt-formula-neutron/pull/8 - Merged
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_policy_for_neutron(self, underlay, openstack_deployed,
salt_actions, show_step):
@@ -390,6 +396,7 @@
LOG.info("*************** DONE **************")
# https://github.com/salt-formulas/salt-formula-keystone/pull/11 - Merged
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_policy_for_keystone(self, underlay, openstack_deployed,
salt_actions, show_step):
diff --git a/tcp_tests/tests/system/test_oss_install.py b/tcp_tests/tests/system/test_oss_install.py
index ec6a6fb..f62f1c8 100644
--- a/tcp_tests/tests/system/test_oss_install.py
+++ b/tcp_tests/tests/system/test_oss_install.py
@@ -22,6 +22,7 @@
class TestOSSInstaller(object):
"""Test class for testing Operational Support System Tools deployment"""
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_oss_install_default(self, underlay, show_step,
oss_deployed, openstack_deployed,