Add 'cookied-mcp-ocata-dop-sl2' lab for DOP+SLv2
* add workflow template and fuel-devops config
* add new environment inventory file with mixed roles on nodes
* add MAKE_SNAPSHOT_STAGES (default=true) to skip snapshots
during deploy and not cause timesync and keepalived issues
Change-Id: I29a504b783a017ef01d0b05535c3a03e7b1574c9
Reviewed-on: https://review.gerrithub.io/378776
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_metadata_process" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/_metadata_process"
index ed7dc6e..5809398 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/_metadata_process"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/_metadata_process"
@@ -1,11 +1,3 @@
-{#- Collect interface roles and params into the following dict:
- # interface_role: # filename that will be included
- # interface_name: # interface (eth0)
- # interface_param1: value # optional parameters or empty dict
- # interface_param2: value
- # ...
- #}
-
{#-
1. Check if 'local_metadata' matches to something in 'global_metadata'.
If yes, fetch and process the data.
@@ -17,51 +9,116 @@
global_metadata keep states across the nodes
local_metadata keep states for the current node only
+
+Example of local_metadata and global_metadata runtime content:
+
+ local_metadata:
+ keepalived_vip_priority:
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ rabbitmq_cluster_role:
+ - openstack_message_queue
+
+ global_metadata:
+ keepalived_vip_priority: # Separate counters
+ openstack_control|openstack_database|openstack_message_queue: 254
+ cicd_control|infra_kvm: 254
+ keepalived_vip_virtual_router_id: # Common counter
+ __latest: 11
+ openstack_control|openstack_database|openstack_message_queue: 10
+ cicd_control|infra_kvm: 11
+ mysql_cluster_role:
+ openstack_database: master
#}
-{%- macro stateful_counter(counter_name, counter_start, counter_end, counter_step) %}
+{%- macro stateful_roles_check(counter_name) %}
+{#- ####################################### -#}
+
+ {#- 1. Check that there is no intersections between different groups of roles for the <counter_name> #}
+ {%- for names, counter in global_metadata.get(counter_name, {}).items() %}
+ {%- set global_roles = names.split('|') %}
+ {%- for local_counter_role_name in local_metadata.get(counter_name, []) %}
+ {%- if local_counter_role_name in global_roles %}
+ {%- set adding_names = local_metadata.get(counter_name, [])|sort|join('|') %}
+ {%- if names != adding_names %}
+ {#- Found unexpected combination of roles, cause the template rendering exception #}
+ {%- include("======> NODE ROLES MAPPING ERROR! Please check the roles for the node '" + inventory_node_name + "' , metaparam '" + counter_name + "':\n======> Existing roles: " + names + "\n======> Adding roles: " + adding_names) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+{%- endmacro %}
+
+{%- macro stateful_counter(counter_name, counter_start, counter_end, counter_step, uniq_per_node=True) %}
{#- ############################################################################# -#}
{%- if counter_name in local_metadata %}
+ {{- stateful_roles_check(counter_name) }}
+
{%- if counter_name not in global_metadata %}
- {#- Set default value for <counter_name> = <counter_start> #}
- {%- set _ = global_metadata.update({counter_name: counter_start}) %}
- {%- else %}
- {#- Increment or decrement value <counter_name> #}
- {%- set _ = global_metadata.update({counter_name: global_metadata[counter_name] + counter_step}) %}
- {%- if global_metadata[counter_name] == counter_end %}
- {# Cause a jinja render exception and make visible the message with correct counter_name #}
- {%- if counter_name == 'cicd_database_id' %}
- {{ "======> VALUE_ERROR: 'cicd_database_id' is too high!"/0 }}
- {%- elif counter_name == 'opencontrail_database_id' %}
- {{ "======> VALUE_ERROR: 'opencontrail_database_id' is too high!"/0 }}
- {%- elif counter_name == 'keepalived_vip_priority' %}
- {{ "======> VALUE_ERROR: 'keepalived_vip_priority' is too low!"/0 }}
- {%- else %}
- {{ "======> VALUE_ERROR: <counter_name> is too high!"/0 }}
+ {%- set _ = global_metadata.update({counter_name: {}}) %}
+ {%- endif %}
+ {%- set counter_roles_name = local_metadata[counter_name]|sort|join('|') %}
+
+ {%- if uniq_per_node == True %}
+
+ {%- if counter_roles_name not in global_metadata[counter_name] %}
+ {#- Set default value for <counter_roles_name> = <counter_start> #}
+ {%- set _ = global_metadata[counter_name].update({counter_roles_name: counter_start}) %}
+ {%- else %}
+ {#- Increment or decrement value <counter_roles_name> #}
+ {%- set _ = global_metadata[counter_name].update({counter_roles_name: global_metadata[counter_name][counter_roles_name] + counter_step}) %}
+ {%- if global_metadata[counter_name][counter_roles_name] == counter_end %}
+ {# Cause a jinja render exception and make visible the message with correct counter_name #}
+ {%- include("======> VALUE_ERROR: " + counter_name + "=" + counter_end + " is out of bounds!" ) %}
{%- endif %}
{%- endif %}
+
+ {%- else %}
+
+ {%- if '__latest' not in global_metadata[counter_name] %}
+ {#- Set the value for __latest = <counter_start> #}
+ {%- set _ = global_metadata[counter_name].update({'__latest': counter_start}) %}
+ {%- endif %}
+ {%- if counter_roles_name not in global_metadata[counter_name] %}
+ {%- set _ = global_metadata[counter_name].update({'__latest': global_metadata[counter_name]['__latest'] + counter_step}) %}
+ {%- if global_metadata[counter_name]['__latest'] == counter_end %}
+ {# Cause a jinja render exception and make visible the message with correct counter_name #}
+ {%- include("======> VALUE_ERROR: " + counter_name + "=" + counter_end + " is out of bounds!" ) %}
+ {%- endif %}
+ {%- set _ = global_metadata[counter_name].update({counter_roles_name: global_metadata[counter_name]['__latest']}) %}
+ {%- endif %}
+
{%- endif %}
- {%- set _ = params.update({counter_name: global_metadata[counter_name]}) %}
+ {%- set _ = params.update({counter_name: global_metadata[counter_name][counter_roles_name]}) %}
{%- endif %}
{%- endmacro %}
{%- macro stateful_masterslave(masterslave_name, master_name='master', slave_name='slave') %}
{#- ##################################################################################### -#}
{%- if masterslave_name in local_metadata %}
+ {{- stateful_roles_check(masterslave_name) }}
+
{%- if masterslave_name not in global_metadata %}
- {#- Set first value <masterslave_name> = <master_name> #}
- {%- set _ = global_metadata.update({masterslave_name: master_name}) %}
- {%- else %}
- {#- Set value <masterslave_name> = <slave_name> #}
- {%- set _ = global_metadata.update({masterslave_name: slave_name}) %}
+ {%- set _ = global_metadata.update({masterslave_name: {}}) %}
{%- endif %}
- {%- set _ = params.update({masterslave_name: global_metadata[masterslave_name]}) %}
+ {%- set masterslave_roles_name = local_metadata[masterslave_name]|sort|join('|') %}
+
+ {%- if masterslave_roles_name not in global_metadata[masterslave_name] %}
+ {#- Set first value <masterslave_roles_name> = <master_name> #}
+ {%- set _ = global_metadata[masterslave_name].update({masterslave_roles_name: master_name}) %}
+ {%- else %}
+ {#- Set value <masterslave_roles_name> = <slave_name> #}
+ {%- set _ = global_metadata[masterslave_name].update({masterslave_roles_name: slave_name}) %}
+ {%- endif %}
+ {%- set _ = params.update({masterslave_name: global_metadata[masterslave_name][masterslave_roles_name]}) %}
{%- endif %}
{%- endmacro %}
{{- stateful_counter('cicd_database_id', counter_start=1, counter_end=255, counter_step=1) }}
{{- stateful_counter('opencontrail_database_id', counter_start=1, counter_end=255, counter_step=1) }}
{{- stateful_counter('keepalived_vip_priority', counter_start=254, counter_end=1, counter_step=-1) }}
+{{- stateful_counter('keepalived_vip_virtual_router_id', counter_start=159, counter_end=250, counter_step=1, uniq_per_node=False) }}
{{- stateful_masterslave('rabbitmq_cluster_role') }}
{{- stateful_masterslave('mysql_cluster_role') }}
-{{- stateful_masterslave('redis_cluster_role') }}
+{{- stateful_masterslave('redis_cluster_role') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/ceph_mon" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/ceph_mon"
index 26cb72a..b07032f 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/ceph_mon"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/ceph_mon"
@@ -8,4 +8,5 @@
#}
- cluster.${_param:cluster_name}.ceph.mon
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'ceph_mon') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'ceph_mon') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'ceph_mon') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_leader"
index 37fcbd3..8f92bdd 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_leader"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_leader"
@@ -9,4 +9,5 @@
- cluster.${_param:cluster_name}.cicd.control.leader
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('keepalived_vip_priority', 'cicd_control') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'cicd_control') }}
{{- register_metaparam('cicd_database_id', 'cicd_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_manager" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_manager"
index b2909c5..9028ddd 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_manager"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/cicd_control_manager"
@@ -9,4 +9,5 @@
- cluster.${_param:cluster_name}.cicd.control.manager
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('keepalived_vip_priority', 'cicd_control') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'cicd_control') }}
{{- register_metaparam('cicd_database_id', 'cicd_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_config" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_config"
index 95b64b6..0bddd76 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_config"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_config"
@@ -8,4 +8,4 @@
#}
- cluster.${_param:cluster_name}.infra.config
- environment.{{ cookiecutter._env_name }}.reclass_datasource_local
- - environment.{{ cookiecutter._env_name }}
+ - environment.{{ cookiecutter._env_name }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_kvm" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_kvm"
index 6945d69..d0f8666 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_kvm"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_kvm"
@@ -8,4 +8,5 @@
#}
- cluster.${_param:cluster_name}.infra.kvm
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'infra_kvm') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'infra_kvm') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'infra_kvm') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_proxy" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_proxy"
index 0386173..1bd8693 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_proxy"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/infra_proxy"
@@ -9,4 +9,5 @@
- cluster.${_param:cluster_name}.openstack.proxy
- cluster.${_param:cluster_name}.stacklight.proxy
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'infra_proxy') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'infra_proxy') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'infra_proxy') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/kubernetes_control" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/kubernetes_control"
index e8b9db7..0af2378 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/kubernetes_control"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/kubernetes_control"
@@ -8,4 +8,5 @@
#}
- cluster.${_param:cluster_name}.kubernetes.control
{{- set_param('keepalived_vip_interface', 'ens3') }}
-{{- register_metaparam('keepalived_vip_priority', 'kubernetes_control') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'kubernetes_control') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'kubernetes_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_analytics" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_analytics"
index 140be07..c8a8921 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_analytics"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_analytics"
@@ -9,4 +9,5 @@
- cluster.${_param:cluster_name}.opencontrail.analytics
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('opencontrail_database_id', 'opencontrail_analytics') }}
-{{- register_metaparam('keepalived_vip_priority', 'opencontrail_analytics') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'opencontrail_analytics') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'opencontrail_analytics') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_control" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_control"
index 4684427..e74a9cf 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_control"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_control"
@@ -9,4 +9,5 @@
- cluster.${_param:cluster_name}.opencontrail.control
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('opencontrail_database_id', 'opencontrail_control') }}
-{{- register_metaparam('keepalived_vip_priority', 'opencontrail_control') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'opencontrail_control') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'opencontrail_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_tor" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_tor"
index 0af03a4..d42a19a 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_tor"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/opencontrail_tor"
@@ -8,4 +8,5 @@
#}
- cluster.${_param:cluster_name}.opencontrail.tor
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'opencontrail_tor') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'opencontrail_tor') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'opencontrail_tor') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_baremetal" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_baremetal"
index 33c0294..f6739e4 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_baremetal"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_baremetal"
@@ -8,4 +8,5 @@
#}
- cluster.${_param:cluster_name}.openstack.baremetal
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_baremetal') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'openstack_baremetal') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_baremetal') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_catalog" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_catalog"
index 8d95fce..e4612af 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_catalog"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_catalog"
@@ -8,4 +8,5 @@
#}
- cluster.${_param:cluster_name}.openstack.catalog
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_catalog') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'openstack_catalog') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_catalog') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control"
index cf49380..00a8c57 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_control"
@@ -9,3 +9,4 @@
- cluster.${_param:cluster_name}.openstack.control
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('keepalived_vip_priority', 'openstack_control') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_control') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database"
index c5594e7..5bc6aee 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_database"
@@ -9,4 +9,5 @@
- cluster.${_param:cluster_name}.openstack.database
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('mysql_cluster_role', 'openstack_database') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_database') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'openstack_database') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_database') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dns" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dns"
index b5cc3d2..07ee063 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dns"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_dns"
@@ -8,4 +8,5 @@
#}
- cluster.${_param:cluster_name}.openstack.dns
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_dns') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'openstack_dns') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_dns') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_message_queue" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_message_queue"
index 13bea9c..d59d3f1 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_message_queue"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_message_queue"
@@ -9,4 +9,5 @@
- cluster.${_param:cluster_name}.openstack.message_queue
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('rabbitmq_cluster_role', 'openstack_message_queue') }}
-{{- register_metaparam('keepalived_vip_priority', 'openstack_message_queue') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'openstack_message_queue') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_message_queue') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_proxy" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_proxy"
index 0fda65e..f342287 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_proxy"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_proxy"
@@ -9,3 +9,4 @@
- cluster.${_param:cluster_name}.openstack.proxy
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('keepalived_vip_priority', 'openstack_proxy') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_proxy') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_telemetry" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_telemetry"
index 59d6343..02f8e2b 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_telemetry"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/openstack_telemetry"
@@ -9,3 +9,4 @@
- cluster.${_param:cluster_name}.openstack.telemetry
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('keepalived_vip_priority', 'openstack_telemetry') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'openstack_telemetry') }}
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log"
index de09752..293effe 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log"
@@ -1,3 +1,4 @@
+{#- For StackLight v1 and v2 #}
{#-
parameters:
reclass:
@@ -9,4 +10,5 @@
- cluster.${_param:cluster_name}.stacklight.log
- cluster.${_param:cluster_name}.stacklight.log_curator
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklight_log') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'stacklight_log') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_log') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v1"
similarity index 76%
rename from "tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader"
rename to "tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v1"
index 83b59d6..da974b3 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v1"
@@ -1,3 +1,4 @@
+{#- For StackLight v1 only #}
{#-
parameters:
reclass:
@@ -7,5 +8,4 @@
classes:
#}
{%- include ("{# roles #}/" + 'stacklight_log') %}
- - cluster.${_param:cluster_name}.stacklight.log_curator
- system.elasticsearch.client.single
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v2"
similarity index 75%
copy from "tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader"
copy to "tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v2"
index 83b59d6..efa482e 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_log_leader_v2"
@@ -1,3 +1,4 @@
+{#- For StackLight v2 only #}
{#-
parameters:
reclass:
@@ -7,5 +8,5 @@
classes:
#}
{%- include ("{# roles #}/" + 'stacklight_log') %}
- - cluster.${_param:cluster_name}.stacklight.log_curator
- - system.elasticsearch.client.single
\ No newline at end of file
+ - system.elasticsearch.client.single
+ - system.kibana.client.single
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor"
index f508816..59361cd 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor"
@@ -1,3 +1,4 @@
+{#- For StackLight v1 only #}
{#-
parameters:
reclass:
@@ -9,4 +10,5 @@
- cluster.${_param:cluster_name}.stacklight.monitor
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('redis_cluster_role', 'stacklight_monitor') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklight_monitor') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'stacklight_monitor') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_monitor') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor_leader"
index ae6d3c4..8f7853d 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor_leader"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_monitor_leader"
@@ -1,3 +1,4 @@
+{#- For StackLight v1 only #}
{#-
parameters:
reclass:
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry"
index e5b9c7b..b5b8344 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry"
@@ -1,3 +1,4 @@
+{#- For StackLight v1 and v2 #}
{#-
parameters:
reclass:
@@ -7,5 +8,8 @@
classes:
#}
- cluster.${_param:cluster_name}.stacklight.telemetry
+ - service.galera.slave.cluster
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklight_telemetry') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'stacklight_telemetry') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_telemetry') }}
+{{- register_metaparam('mysql_cluster_role', 'stacklight_telemetry') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry_leader"
new file mode 100644
index 0000000..c439777
--- /dev/null
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklight_telemetry_leader"
@@ -0,0 +1,15 @@
+{#- For StackLight v1 and v2 #}
+{#-
+parameters:
+ reclass:
+ storage:
+ node:
+ <reclass_storage_node_name>:
+ classes:
+#}
+ - cluster.${_param:cluster_name}.stacklight.telemetry
+ - service.galera.master.cluster
+{{- set_param('keepalived_vip_interface', 'br_ctl') }}
+{{- register_metaparam('keepalived_vip_priority', 'stacklight_telemetry') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklight_telemetry') }}
+{{- register_metaparam('mysql_cluster_role', 'stacklight_telemetry') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server"
index 773dc1c..c6032d3 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server"
@@ -10,4 +10,5 @@
- cluster.${_param:cluster_name}.stacklight.server
{{- set_param('keepalived_vip_interface', 'br_ctl') }}
{{- register_metaparam('redis_cluster_role', 'stacklightv2_server') }}
-{{- register_metaparam('keepalived_vip_priority', 'stacklightv2_server') }}
\ No newline at end of file
+{{- register_metaparam('keepalived_vip_priority', 'stacklightv2_server') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklightv2_server') }}
\ No newline at end of file
diff --git "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server_leader" "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server_leader"
index 7e4f4ca..a5acec2 100644
--- "a/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server_leader"
+++ "b/tcp_tests/environment/environment_template/\173\043 roles \043\175/stacklightv2_server_leader"
@@ -6,5 +6,10 @@
<reclass_storage_node_name>:
classes:
#}
- {%- include ("{# roles #}/" + 'stacklightv2_server') %}
- - cluster.${_param:cluster_name}.stacklight.client
\ No newline at end of file
+ - system.docker.swarm.master
+ - cluster.${_param:cluster_name}.stacklight.server
+ - cluster.${_param:cluster_name}.stacklight.client
+{{- set_param('keepalived_vip_interface', 'br_ctl') }}
+{{- register_metaparam('redis_cluster_role', 'stacklightv2_server') }}
+{{- register_metaparam('keepalived_vip_priority', 'stacklightv2_server') }}
+{{- register_metaparam('keepalived_vip_virtual_router_id', 'stacklightv2_server') }}
\ No newline at end of file
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 1e17518..4f1186b 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -136,17 +136,17 @@
and snapshot_needed:
snapshot_name = utils.extract_name_from_mark(snapshot_needed) or \
"{}_passed".format(default_snapshot_name)
- hardware.create_snapshot(snapshot_name)
+ hardware.create_snapshot(snapshot_name, force=True)
elif hasattr(request.node, 'rep_setup') and \
request.node.rep_setup.failed and fail_snapshot:
snapshot_name = "{0}_prep_failed".format(default_snapshot_name)
- hardware.create_snapshot(snapshot_name)
+ hardware.create_snapshot(snapshot_name, force=True)
elif hasattr(request.node, 'rep_call') and \
request.node.rep_call.failed and fail_snapshot:
snapshot_name = "{0}_failed".format(default_snapshot_name)
- hardware.create_snapshot(snapshot_name)
+ hardware.create_snapshot(snapshot_name, force=True)
request.addfinalizer(test_fin)
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 958e7f1..c800cc2 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -167,7 +167,7 @@
config_ssh.append(ssh_data)
return config_ssh
- def create_snapshot(self, name, description=None):
+ def create_snapshot(self, name, description=None, force=False):
"""Create named snapshot of current env.
- Create a libvirt snapshots for all nodes in the environment
@@ -175,6 +175,11 @@
:name: string
"""
+ if not settings.MAKE_SNAPSHOT_STAGES and not force:
+ msg = ("[ SKIP snapshot '{0}' because MAKE_SNAPSHOT_STAGES=false ]"
+ " {1}".format(name, description or ''))
+ LOG.info("\n\n{0}\n{1}".format(msg, '*' * len(msg)))
+ return
msg = "[ Create snapshot '{0}' ] {1}".format(name, description or '')
LOG.info("\n\n{0}\n{1}".format(msg, '*' * len(msg)))
@@ -233,6 +238,11 @@
:param name: string
"""
+ if not settings.MAKE_SNAPSHOT_STAGES:
+ LOG.info("SKIP reverting from snapshot '{0}' "
+ "because MAKE_SNAPSHOT_STAGES=false".format(name))
+ return
+
LOG.info("Reverting from snapshot named '{0}'".format(name))
if self.__env is not None:
self.__env.revert(name=name)
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
index b9ab8e1..b543c87 100644
--- a/tcp_tests/managers/envmanager_empty.py
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -54,7 +54,7 @@
raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
"Please provide SSH details in config.underlay.ssh")
- def create_snapshot(self, name, description=None):
+ def create_snapshot(self, name, **kwargs):
"""Store environmetn state into the config object
- Store the state of the environment <name> to the 'config' object
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index f4f7334..ef946a2 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -30,6 +30,8 @@
VIRTUAL_ENV = os.environ.get("VIRTUAL_ENV", None)
ENV_NAME = os.environ.get("ENV_NAME", None)
+MAKE_SNAPSHOT_STAGES = get_var_as_bool("MAKE_SNAPSHOT_STAGES", True)
+SHUTDOWN_ENV_ON_TEARDOWN = get_var_as_bool('SHUTDOWN_ENV_ON_TEARDOWN', True)
LAB_CONFIG_NAME = os.environ.get('LAB_CONFIG_NAME', 'mk22-lab-basic')
#LAB_CONFIGS_NAME = os.environ.get('LAB_NAME', 'mk22-lab-advanced')
@@ -39,8 +41,6 @@
SSH_NODE_CREDENTIALS = {"login": SSH_LOGIN,
"password": SSH_PASSWORD}
-SHUTDOWN_ENV_ON_TEARDOWN = get_var_as_bool('SHUTDOWN_ENV_ON_TEARDOWN', True)
-
# public_iface = IFACES[0]
# private_iface = IFACES[1]
IFACES = [
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
new file mode 100644
index 0000000..ea2997a
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
@@ -0,0 +1,125 @@
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the OpenStack control VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
+ echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install nginx on prx nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
new file mode 100644
index 0000000..891fb2e
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
@@ -0,0 +1,401 @@
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set PATTERN = os_env('PATTERN', 'smoke') %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'ctl*' state.sls powerdns
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Allow all tcp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Allow all icmp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temp workaround of PROD-13167
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
+ 'apt-get install python-pymysql -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Set disks 03
+# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+# node_name: {{ HOSTNAME_CTL03 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Temporary WR set enabled backends value 03
+# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+# node_name: {{ HOSTNAME_CTL03 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Run tests
+ cmd: |
+ if [[ {{ PATTERN }} == "false" ]]; then
+ docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
+ else
+ docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -e CUSTOM='--pattern {{ PATTERN }}' -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
+ fi
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Download xml results
+ download:
+ remote_path: /root
+ remote_filename: "report_*.xml"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_GTW01 }}
+ skip_fail: true
+
+- description: Download html results
+ download:
+ remote_path: /root
+ remote_filename: "report_*.html"
+ local_path: {{ os_env('PWD') }}
+ node_name: {{ HOSTNAME_GTW01 }}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
new file mode 100644
index 0000000..85085e2
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
@@ -0,0 +1,305 @@
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install OSS: Operational Support System Tools
+
+# Keepalived
+#-----------
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster:enabled:True' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@haproxy:proxy:enabled:True' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the CICD VIP
+ cmd: |
+ CICD_CONTROL_ADDRESS=`salt --out=newline_values_only -C 'I@haproxy:proxy and I@jenkins:client' pillar.get _param:cluster_vip_address`;
+ echo "_param:cluster_vip_address (vip): ${CICD_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C "I@keepalived:cluster:instance:*:address:${CICD_CONTROL_ADDRESS}" cmd.run "ip a | grep ${CICD_CONTROL_ADDRESS}" | grep -B1 ${CICD_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Glusterfs
+#-----------
+
+- description: Prepare glusterfs service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:server:enabled:True' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:server:enabled:True' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@glusterfs:client:enabled:True' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client:enabled:True' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Setup Docker Swarm
+#-------------------
+
+- description: "Workaround: create /var/lib/jenkins to get Jenkins slaves working"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run 'mkdir -p /var/lib/jenkins'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Prepare Docker host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host:enabled:True' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Docker Swarm master
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Collect grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls salt.minion.grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' mine.flush &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' mine.update &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules &&
+ sleep 10
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 15}
+ skip_fail: false
+
+- description: Install Docker Swarm on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Show Docker Swarm nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Configure OSS services
+#-----------------------
+
+- description: Setup devops portal
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@devops_portal:config:enabled' state.sls devops_portal.config
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup Rundeck server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:server' state.sls rundeck.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Deploy Docker services
+#-----------------------
+
+# Original comment from pipeline: XXX: for some weird unknown reason, refresh_pillar is required to execute here
+
+- description: "Workaround from the pipeline: XXX: for some weird unknown reason, refresh_pillar is required to execute here"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:publisher' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround from the pipeline: We need /etc/aptly-publisher.yaml to be present before services are deployed. [dd: there were issues when /etc/aptly-publisher.yaml becomes a directory, so this step should be considered]"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:publisher' state.sls aptly.publisher
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Docker client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: "Workaround from the pipeline: sync all salt objects"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+# Aptly
+#------
+
+- description: "Wait for Aptly to come up in container..."
+ cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' cmd.run
+ 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+ while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8084/api/version && break; sleep 2; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 15}
+ skip_fail: false
+
+- description: "Setup Aptly"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' state.sls aptly
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+# OpenLDAP
+#---------
+
+- description: "Waiting for OpenLDAP to come up in container..."
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' cmd.run
+ 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+ while true; do curl -sf ldap://${CICD_CONTROL_ADDRESS} && break; sleep 2; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: "Setup OpenLDAP"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' state.sls openldap &&
+ sleep 20
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+# Gerrit
+#-------
+
+- description: "Waiting for Gerrit to come up in container..."
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' cmd.run
+ 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+ while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8080/config/server/version && break; sleep 2; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: "Setup Gerrit"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' state.sls gerrit
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+# Jenkins
+#--------
+
+- description: "Waiting for Jenkins to come up in container..."
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run
+ 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+ export JENKINS_CLIENT_USER=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_user);
+ export JENKINS_CLIENT_PASSWORD=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_password);
+ while true; do
+ curl -f -u ${JENKINS_CLIENT_USER}:${JENKINS_CLIENT_PASSWORD} http://${CICD_CONTROL_ADDRESS}:8081/api/json?pretty=true && break;
+ sleep 2;
+ done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: "Setup Jenkins"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' state.sls jenkins
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+# Postgres && Pushkin
+#--------------------
+
+- description: "Waiting for postgresql database to come up in container..."
+# cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+# 'while true; do if docker service logs postgresql_db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
+ cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+ 'while true; do if docker service logs postgresql_postgresql-db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: ("Create PostgreSQL databases, it fails at first run because of known deadlock:\n"
+ "1. State postgresql.client cannot insert values into 'pushkin' database because it is created empty,\n"
+ "2. Container with Pushkin cannot start and fill the database scheme until state postgresql.client created users.")
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' state.sls postgresql.client -b 1 &&
+ timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+ 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+ while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8887/apps && break; sleep 2; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 100}
+ skip_fail: false
+
+# Rundeck
+#--------
+
+- description: Waiting for Rundeck to come up in container...
+ cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' cmd.run
+ 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+ while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:4440 && break; sleep 2; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Setup Rundeck
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' state.sls rundeck.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+# Elasticsearch
+#--------------
+
+- description: 'Waiting for Elasticsearch to come up in container...'
+ cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
+ 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+ while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Setup Elasticsearch
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+
+
+# Generate docs
+#--------------
+
+- description: Install sphinx (may fail depending on the model)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Generate docs (may fail depending on the model)
+ cmd: salt-run state.orchestrate sphinx.orch.generate_doc
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install nginx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+# Final checks
+#-------------
+
+- description: Check for system services in failed state
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cicd_oss.yaml
new file mode 100644
index 0000000..680c08c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cicd_oss.yaml
@@ -0,0 +1,219 @@
+default_context:
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxgROyM+RnJiDhS/qfXayxDbGmMqIGWsWPnc1RdMcJ9xlBM4a
+ bj4iNB7wmj19oMRBXKvrvMsbnhOJ1Z1tWH1jwm3rZ7ziJlDUo1Ms/wAPXV67+ayu
+ LCSp6JGTzaxo/4YTfzUvvnCJvPKuIf+BjxE6/Rzdzrp6b4FYuvOfkxN/pK4HfrrR
+ wJjyQCCeXGrDcq3vKvBaZ/19MN5JtrrCRx4L42UFy1kAkNCCRir+YsK+tiDM3Tfo
+ r95PNXdMyeKzMEc858D9XxK+UyNFjGrO2hZm6fmRjgWMuNnaGnVotmT1z1pB91d3
+ 5q7n60d3Q7KRn6p+xStrwB7rB9+Jsi3L6q+VEQIDAQABAoIBAExCJnExdvtexO/K
+ 9zxgNRJZofv/q5BWbFecIxkD50j2mLTUBtvD8/osnC5aVgJO8nkVAJFRiw5Cqgdp
+ PE4i5ANhv5HQ7SsiX/GSO7bst/4WWMAbn2wCpqiZP9mqdzlI0kNgIUXvIyxwLV2M
+ f8GwOg52Snmt2S8OGjTNU/wZO0QYzzi44tf2/q0QWy0EV4g2oLq66T/kKpx5FmZQ
+ 0cD9GiESfmzWiq2Aivy4if7VmW4fCxTIvmUypSQf+M4J7ZR6QYUbkr19wNEiYAUq
+ k9aitJNIVW0johbZwexTTF1YiIVuvSwOI/lHGz1e7iVu/hZxx35JtkzLzF9Dd01q
+ M0IMXz0CgYEA95aOjqJTp2KQT++Q4uPl/K1FLNquqZ02SyUNVglkuVn6THHsTC8Q
+ MfO+l39bh1QGTK/bh4dyXub2jEYfTSn1K8YMOYp57tgHTJ0Y8AZbtYaEP0g3BeO5
+ Myd1/YUY+vM6h58wyoqhDLwRql5u5GM8HAibK32d+Fnrf3VSM0i4jT8CgYEAzL6Y
+ c8Fu4ezRiKR1x7jSgbePADRZa7xvLKenuMMYmtg+AixEp5nmm9/vBtmrhE+RQNXw
+ mQvt8EId/XGcJhv83Y+QeYg3AhsdGMIYmlGhFGJ3FtcA72wt3FTGOa2KMtmI6khL
+ WqYohvESfLtCumW0XPRRUVNKF73UKjMa8VnsOa8CgYBto/CRXXUqJM2/eFlzAHUy
+ hhCiIl1Co2oNsOTM+u/t3NiozbJUsmq7lDMMp8uCjEUV5LKUu/h76k+4Ir1t0GzP
+ 664yNQ52JJhm5xLKCCbIpj8ePv6Ozx+OdaUclbpQNzHuKSLULrvPBeHUzmjRHtjZ
+ mT4N7lzsQ/WzxeKW71c6xQKBgDGrj1qNs7O1ewO2OiiQqujzOgrnqEXdue7QYX0O
+ P3rZOPnX+XPbfzmTcu5rghOgJfHftPW8EiY2NAZXOHV6Vrb9bCQ/qnClWUK3W7ac
+ VQKX/KIa2Mw8p0eLfWditWMuqOuFTFqacryB4WVHHKIRqFbgopWjKhdmYwE10rR4
+ hzlbAoGBAMpZ+D08Us5wrsbVlYfOobuHgq2ENPvQnZqJfTobAPGtrMk/M7M4Ga1U
+ +zeO8VA0Tj5jK2qI+MIB2hZmgjp49FbejKFAD+q3srkyqwkGerNXkWOiDGmvYhKR
+ UbC4GcycVQsIZK4bw0K7Pl40/u9artsAFmWOoUunyO4QH8J8EDXJ
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGBE7Iz5GcmIOFL+p9drLENsaYyogZaxY+dzVF0xwn3GUEzhpuPiI0HvCaPX2gxEFcq+u8yxueE4nVnW1YfWPCbetnvOImUNSjUyz/AA9dXrv5rK4sJKnokZPNrGj/hhN/NS++cIm88q4h/4GPETr9HN3OunpvgVi685+TE3+krgd+utHAmPJAIJ5casNyre8q8Fpn/X0w3km2usJHHgvjZQXLWQCQ0IJGKv5iwr62IMzdN+iv3k81d0zJ4rMwRzznwP1fEr5TI0WMas7aFmbp+ZGOBYy42doadWi2ZPXPWkH3V3fmrufrR3dDspGfqn7FK2vAHusH34myLcvqr5UR
+ cluster_domain: cicd-sl2.local
+ cluster_name: integration-dop-sl2
+ deployment_type: physical
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ control_network_netmask: 255.255.255.0
+ control_vlan: '10'
+ deploy_network_gateway: ''
+ deploy_network_netmask: 255.255.255.0
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_deploy_nic: eth0
+ maas_deploy_address: 10.167.4.91
+ maas_hostname: mas01
+ infra_kvm01_control_address: 10.167.4.91
+ infra_kvm01_deploy_address: 10.167.5.91
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.92
+ infra_kvm02_deploy_address: 10.167.5.92
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.93
+ infra_kvm03_deploy_address: 10.167.5.93
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.90
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: cicd-sl2.local # Must be plain text because cookiecutter-templates split it by dots
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openstack_compute_count: '100'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_control_address: 10.167.4.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.4.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.4.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.4.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.10
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.4.11
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.4.12
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.4.13
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.4.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.6.6
+ openstack_gateway_node02_address: 10.167.4.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.6.7
+ openstack_gateway_node03_address: 10.167.4.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.6.8
+ openstack_message_queue_address: 10.167.4.10
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.4.11
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.4.12
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.4.13
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_neutron_qos: 'False'
+ openstack_ovs_encapsulation_type: vlan
+ openstack_ovs_encapsulation_vlan_range: 2416:2420
+ openstack_proxy_address: 10.167.4.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.4.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.4.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_telemetry_address: 10.167.4.10
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_address: 10.167.4.11
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_address: 10.167.4.12
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_address: 10.167.4.13
+ openstack_telemetry_node03_hostname: mdb03
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_benchmark_node01_address: 10.167.4.85
+ openstack_version: ocata
+ public_host: ${_param:openstack_proxy_address}
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ stacklight_enabled: 'True'
+ stacklight_version: '2'
+ stacklight_log_address: 10.167.4.70
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.71
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.72
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.73
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.70
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.71
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.72
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.73
+ stacklight_telemetry_node03_hostname: mtr03
+ tenant_network_gateway: ''
+ tenant_network_netmask: 255.255.255.0
+ tenant_vlan: '20'
+ oss_enabled: 'True'
+ oss_openstack_auth_url: http://10.100.0.10:5000/v3
+ oss_openstack_username: admin
+ oss_openstack_password: password
+ oss_openstack_project: admin
+ oss_openstack_domain_id: default
+ oss_openstack_cert: ''
+ oss_runbook_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxgROyM+RnJiDhS/qfXayxDbGmMqIGWsWPnc1RdMcJ9xlBM4a
+ bj4iNB7wmj19oMRBXKvrvMsbnhOJ1Z1tWH1jwm3rZ7ziJlDUo1Ms/wAPXV67+ayu
+ LCSp6JGTzaxo/4YTfzUvvnCJvPKuIf+BjxE6/Rzdzrp6b4FYuvOfkxN/pK4HfrrR
+ wJjyQCCeXGrDcq3vKvBaZ/19MN5JtrrCRx4L42UFy1kAkNCCRir+YsK+tiDM3Tfo
+ r95PNXdMyeKzMEc858D9XxK+UyNFjGrO2hZm6fmRjgWMuNnaGnVotmT1z1pB91d3
+ 5q7n60d3Q7KRn6p+xStrwB7rB9+Jsi3L6q+VEQIDAQABAoIBAExCJnExdvtexO/K
+ 9zxgNRJZofv/q5BWbFecIxkD50j2mLTUBtvD8/osnC5aVgJO8nkVAJFRiw5Cqgdp
+ PE4i5ANhv5HQ7SsiX/GSO7bst/4WWMAbn2wCpqiZP9mqdzlI0kNgIUXvIyxwLV2M
+ f8GwOg52Snmt2S8OGjTNU/wZO0QYzzi44tf2/q0QWy0EV4g2oLq66T/kKpx5FmZQ
+ 0cD9GiESfmzWiq2Aivy4if7VmW4fCxTIvmUypSQf+M4J7ZR6QYUbkr19wNEiYAUq
+ k9aitJNIVW0johbZwexTTF1YiIVuvSwOI/lHGz1e7iVu/hZxx35JtkzLzF9Dd01q
+ M0IMXz0CgYEA95aOjqJTp2KQT++Q4uPl/K1FLNquqZ02SyUNVglkuVn6THHsTC8Q
+ MfO+l39bh1QGTK/bh4dyXub2jEYfTSn1K8YMOYp57tgHTJ0Y8AZbtYaEP0g3BeO5
+ Myd1/YUY+vM6h58wyoqhDLwRql5u5GM8HAibK32d+Fnrf3VSM0i4jT8CgYEAzL6Y
+ c8Fu4ezRiKR1x7jSgbePADRZa7xvLKenuMMYmtg+AixEp5nmm9/vBtmrhE+RQNXw
+ mQvt8EId/XGcJhv83Y+QeYg3AhsdGMIYmlGhFGJ3FtcA72wt3FTGOa2KMtmI6khL
+ WqYohvESfLtCumW0XPRRUVNKF73UKjMa8VnsOa8CgYBto/CRXXUqJM2/eFlzAHUy
+ hhCiIl1Co2oNsOTM+u/t3NiozbJUsmq7lDMMp8uCjEUV5LKUu/h76k+4Ir1t0GzP
+ 664yNQ52JJhm5xLKCCbIpj8ePv6Ozx+OdaUclbpQNzHuKSLULrvPBeHUzmjRHtjZ
+ mT4N7lzsQ/WzxeKW71c6xQKBgDGrj1qNs7O1ewO2OiiQqujzOgrnqEXdue7QYX0O
+ P3rZOPnX+XPbfzmTcu5rghOgJfHftPW8EiY2NAZXOHV6Vrb9bCQ/qnClWUK3W7ac
+ VQKX/KIa2Mw8p0eLfWditWMuqOuFTFqacryB4WVHHKIRqFbgopWjKhdmYwE10rR4
+ hzlbAoGBAMpZ+D08Us5wrsbVlYfOobuHgq2ENPvQnZqJfTobAPGtrMk/M7M4Ga1U
+ +zeO8VA0Tj5jK2qI+MIB2hZmgjp49FbejKFAD+q3srkyqwkGerNXkWOiDGmvYhKR
+ UbC4GcycVQsIZK4bw0K7Pl40/u9artsAFmWOoUunyO4QH8J8EDXJ
+ -----END RSA PRIVATE KEY-----
+ oss_runbook_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGBE7Iz5GcmIOFL+p9drLENsaYyogZaxY+dzVF0xwn3GUEzhpuPiI0HvCaPX2gxEFcq+u8yxueE4nVnW1YfWPCbetnvOImUNSjUyz/AA9dXrv5rK4sJKnokZPNrGj/hhN/NS++cIm88q4h/4GPETr9HN3OunpvgVi685+TE3+krgd+utHAmPJAIJ5casNyre8q8Fpn/X0w3km2usJHHgvjZQXLWQCQ0IJGKv5iwr62IMzdN+iv3k81d0zJ4rMwRzznwP1fEr5TI0WMas7aFmbp+ZGOBYy42doadWi2ZPXPWkH3V3fmrufrR3dDspGfqn7FK2vAHusH34myLcvqr5UR
+ oss_notification_email_from: ''
+ oss_notification_email_recipients: ''
+ oss_notification_salesforce_username: ''
+ oss_notification_sender_password: ''
+ oss_notification_smtp_host: ''
+ oss_notification_smtp_port: ''
+ oss_notification_webhook_login_id: '13'
+ oss_notification_webhook_app_id: '24'
+ oss_cis_enabled: 'True'
+ oss_cis_jobs_repository: https://github.com/Mirantis/rundeck-cis-jobs.git
+ oss_cis_jobs_repository_branch: master
+ oss_security_audit_enabled: 'True'
+ oss_security_audit_os_ssl_verify: 'True'
+ oss_security_audit_os_cacert_path: '/srv/volumes/rundeck/storage/content/keys/cis/openstack/cert.pem'
+ oss_cleanup_service_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
new file mode 100644
index 0000000..16f4fb1
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
@@ -0,0 +1,203 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ cid01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - infra_kvm
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ cid02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - infra_kvm
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ cid03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - infra_kvm
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - openstack_telemetry
+ - features_designate_database
+ - features_designate
+ - features_designate_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - openstack_telemetry
+ - features_designate_database
+ - features_designate
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - openstack_telemetry
+ - features_designate_database
+ - features_designate
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ prx02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - stacklight_telemetry_leader
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
new file mode 100644
index 0000000..8b1b7ef
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -0,0 +1,165 @@
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','virtual-mcp-ocata-dop-sl2') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_PATH = '/tmp/environment/salt-context-cicd_oss.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_PATH = '/tmp/environment/salt-context-environment.yaml' %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+- description: "[EXPERIMENTAL] Upload 'environment' to {{ HOSTNAME_CFG01 }}"
+ upload:
+ local_path: {{ config.salt_deploy.environment_template_dir }}
+ remote_path: /tmp/environment/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: false
+
+- description: "[EXPERIMENTAL] Upload cookiecutter-templates context to {{ HOSTNAME_CFG01 }}"
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: salt-context-cicd_oss.yaml
+ remote_path: /tmp/environment/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: "[EXPERIMENTAL] Upload environment inventory to {{ HOSTNAME_CFG01 }}"
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: salt-context-environment.yaml
+ remote_path: /tmp/environment/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+{#{ SHARED.MACRO_CLONE_RECLASS_MODELS() }#}
+- description: Create cluster model from cookiecutter templates
+ cmd: |
+ set -e;
+ pip install cookiecutter
+ export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates /tmp/cookiecutter-templates
+ mkdir -p /srv/salt/reclass/classes/cluster/
+ mkdir -p /srv/salt/reclass/classes/system/
+ mkdir -p /srv/salt/reclass/classes/service/
+ mkdir -p /srv/salt/reclass/nodes/_generated
+
+ # Override some context parameters
+ sed -i 's/cluster_name:.*/cluster_name: {{ LAB_CONFIG_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/cluster_domain:.*/cluster_domain: {{ DOMAIN_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/control_vlan:.*/control_vlan: \"2416\"/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/tenant_vlan:.*/tenant_vlan: \"2417\"/g' {{ CLUSTER_CONTEXT_PATH }}
+
+ # Temporary workaround (with hardcoded address .90 -> .15) of bug https://mirantis.jira.com/browse/PROD-14377
+ sed -i 's/salt_master_address:.*/salt_master_address: {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/salt_master_management_address:.*/salt_master_management_address: {{ SHARED.IPV4_NET_ADMIN_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
+
+ # Replace firstly to an intermediate value to avoid intersection between
+ # already replaced and replacing networks.
+ # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
+ # 192.168.10 -> 10.16.0 (generated network for admin)
+ # 10.16.0 -> <external network>
+ # So let's replace constant networks to the keywords, and then keywords to the desired networks.
+ sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/172\.17\.16\./==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+
+ sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ SHARED.IPV4_NET_ADMIN_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ SHARED.IPV4_NET_CONTROL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ SHARED.IPV4_NET_TENANT_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ SHARED.IPV4_NET_EXTERNAL_PREFIX }}./g' {{ CLUSTER_CONTEXT_PATH }}
+
+ for i in $(ls /tmp/cookiecutter-templates/cluster_product/); do
+ python /tmp/cookiecutter-templates/generate.py \
+ --template /tmp/cookiecutter-templates/cluster_product/$i \
+ --config-file {{ CLUSTER_CONTEXT_PATH }} \
+ --output-dir /srv/salt/reclass/classes/cluster/;
+ done
+
+ export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/salt-models/reclass-system /srv/salt/reclass/classes/system/
+
+ # Create the cfg01 node and disable checkouting the model from remote repository
+ cat << 'EOF' >> /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml
+ classes:
+ - system.openssh.server.team.all
+ - cluster.{{ LAB_CONFIG_NAME }}.infra.config
+ - environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}.reclass_datasource_local
+ - environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/;
+ reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
+ reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
+
+ #if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}$"; then
+ # reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml --merge ;
+ #fi;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround for PROD-14756: all roles must use service.keepalived.cluster.single with the default 'VIP' instance"
+ cmd: |
+ set -e;
+ find /srv/salt/reclass/classes/cluster/ -type f -exec sed -i 's/system.keepalived.*/service.keepalived.cluster.single/g' {} +
+ find /srv/salt/reclass/classes/system/ -type f -exec sed -i 's/system.keepalived.*/service.keepalived.cluster.single/g' {} +
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "[EXPERIMENTAL] Create environment model for virtual environment"
+ cmd: |
+ set -e;
+ ln -s '/tmp/environment/environment_template/{{ '{# interfaces #}' }}' '/tmp/environment/environment_template/{{ '{{ cookiecutter._env_name }}' }}/';
+ ln -s '/tmp/environment/environment_template/{{ '{# roles #}' }}' '/tmp/environment/environment_template/{{ '{{ cookiecutter._env_name }}' }}/';
+ reclass-tools render --template-dir /tmp/environment/environment_template/ \
+ --output-dir /srv/salt/reclass/classes/environment/ \
+ --context {{ ENVIRONMENT_CONTEXT_PATH }} \
+ --env-name {{ ENVIRONMENT_MODEL_INVENTORY_NAME }}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+#- description: Hack gtw node
+# cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: Hack cmp01 node
+# cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: Hack cmp02 node
+# cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
new file mode 100644
index 0000000..c7a708d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
@@ -0,0 +1,187 @@
+{% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install docker swarm
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on StackLight mon nodes
+ cmd: |
+ SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
+ echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure docker service
+ cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure collector
+ cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Change environment configuration before deploy
+- description: Set SL docker images deploy parameters
+ cmd: |
+ {% for sl_opt, value in config.sl_deploy.items() %}
+ {% if value|string() %}
+ salt-call reclass.cluster_meta_set {{ sl_opt }} {{ value }};
+ {% endif %}
+ {% endfor %}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..8cb2c2d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
@@ -0,0 +1,97 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+ #- sudo ifup ens4
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44 >> /etc/resolv.conf;
+ - echo "nameserver 8.8.8.8 >> /etc/resolv.conf;
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - apt-get update
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml
new file mode 100644
index 0000000..3bc891d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml
@@ -0,0 +1,94 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ # WARNING! On CID* nodes, admin network is connected to ens4, and control network to ens3 (as in the model)
+ # On other nodes (cfg01 and openstack), admin network is connected to ens3, and control network to ens4
+ - sudo ifup ens4
+
+ # Create swap
+ - fallocate -l 8G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ - eatmydata apt-get install -y linux-generic-hwe-16.04
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet manual
+ auto ens4
+ iface ens4 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
new file mode 100644
index 0000000..677c392
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
@@ -0,0 +1,92 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+ #- sudo ifup ens4
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ - eatmydata apt-get install -y linux-generic-hwe-16.04
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
new file mode 100644
index 0000000..93ac360
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
@@ -0,0 +1,652 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dop-sl2') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+{% import 'cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data-cicd.yaml' as CLOUDINIT_USER_DATA_CICD with context %}
+{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_cicd {{ CLOUDINIT_USER_DATA_CICD }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+
+ default_{{ HOSTNAME_CTL }}: +10
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_GTW01 }}: +224
+ default_{{ HOSTNAME_GTW02 }}: +225
+ default_{{ HOSTNAME_PRX }}: +80
+ default_{{ HOSTNAME_PRX01 }}: +81
+ default_{{ HOSTNAME_PRX02 }}: +82
+
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+
+ default_{{ HOSTNAME_CTL }}: +10
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_GTW01 }}: +224
+ default_{{ HOSTNAME_GTW02 }}: +225
+ default_{{ HOSTNAME_PRX }}: +80
+ default_{{ HOSTNAME_PRX01 }}: +81
+ default_{{ HOSTNAME_PRX02 }}: +82
+
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+
+ default_{{ HOSTNAME_CTL }}: +10
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_GTW01 }}: +6
+ default_{{ HOSTNAME_GTW02 }}: +7
+ default_{{ HOSTNAME_PRX }}: +80
+ default_{{ HOSTNAME_PRX01 }}: +81
+ default_{{ HOSTNAME_PRX02 }}: +82
+
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+
+ default_{{ HOSTNAME_CTL }}: +10
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_GTW01 }}: +224
+ default_{{ HOSTNAME_GTW02 }}: +225
+ default_{{ HOSTNAME_PRX }}: +80
+ default_{{ HOSTNAME_PRX01 }}: +81
+ default_{{ HOSTNAME_PRX02 }}: +82
+
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ private:
+ address_pool: private-pool01
+ dhcp: false
+
+ #tenant:
+ # address_pool: tenant-pool01
+ # dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 3072
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 3072
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - private
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 3072
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/tests/system/test_install_cookied_ocata.py b/tcp_tests/tests/system/test_install_cookied_ocata.py
index 41adb89..c3ac83f 100644
--- a/tcp_tests/tests/system/test_install_cookied_ocata.py
+++ b/tcp_tests/tests/system/test_install_cookied_ocata.py
@@ -46,3 +46,49 @@
"""
LOG.info("*************** DONE **************")
+
+ @pytest.mark.fail_snapshot
+ def test_cookied_ocata_cicd_oss_install(self, underlay, oss_deployed,
+ openstack_deployed, sl_deployed,
+ show_step):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup CICD nodes
+ 3. Setup OpenStack
+ 4. Setup StackLight v2
+ 5. Get monitoring nodes
+ 6. Check that docker services are running
+ 7. Check current prometheus targets are UP
+ 8. Run SL component tests
+ 9. Download SL component tests report
+ """
+ expected_service_list = ['monitoring_remote_storage_adapter',
+ 'monitoring_server',
+ 'monitoring_remote_agent',
+ 'dashboard_grafana',
+ 'monitoring_alertmanager',
+ 'monitoring_remote_collector',
+ 'monitoring_pushgateway']
+ show_step(5)
+ mon_nodes = sl_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+
+ show_step(6)
+ sl_deployed.check_docker_services(mon_nodes, expected_service_list)
+
+ show_step(7)
+ sl_deployed.check_prometheus_targets(mon_nodes)
+
+ show_step(8)
+ # Run SL component tetsts
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/tests/prometheus')
+
+ show_step(9)
+ # Download report
+ sl_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests')
+ LOG.info("*************** DONE **************")