Merge "Fix regex"
diff --git a/tcp_tests/helpers/exceptions.py b/tcp_tests/helpers/exceptions.py
index f6c2310..7bc4abc 100644
--- a/tcp_tests/helpers/exceptions.py
+++ b/tcp_tests/helpers/exceptions.py
@@ -133,3 +133,14 @@
def __str__(self):
return ("Salt pillar '{0}' error on minion {1}: {2}"
.format(self.minion_id, self.pillar, self.message))
+
+
+class EnvironmentNodeIsNotStarted(BaseException):
+ def __init__(self, node_name, message=''):
+ super(EnvironmentNodeIsNotStarted, self).__init__()
+ self.node_name = node_name
+ self.message = message
+
+ def __str__(self):
+ return ("Cloud-init failed on node {0} with error: \n{1}"
+ .format(self.node_name, self.message))
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index d02cff5..79bec94 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -309,6 +309,7 @@
LOG.info('Environment "{0}" started'.format(self.__env.name))
check_cloudinit_started = '[ -f /is_cloud_init_started ]'
check_cloudinit_finished = '[ -f /is_cloud_init_finished ]'
+ check_cloudinit_failed = 'cat /is_cloud_init_failed'
passed = {}
for node in self.__env.get_nodes(role__in=underlay_node_roles):
LOG.info("Waiting for SSH on node '{0}' / {1} ...".format(
@@ -329,6 +330,12 @@
# If '/is_cloud_init_started' exists, then wait for
# the flag /is_cloud_init_finished
if ssh.execute(check_cloudinit_started)['exit_code'] == 0:
+ result = ssh.execute(check_cloudinit_failed)
+ if result['exit_code'] == 0:
+ raise exceptions.EnvironmentNodeIsNotStarted(
+ "{0}:{1}".format(host, port),
+ result.stdout_str)
+
status = ssh.execute(
check_cloudinit_finished)['exit_code'] == 0
# Else, just wait for SSH
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
index 77c18d1..4b134b7 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
@@ -61,19 +61,19 @@
#- cp /root/config-drive/user-data /root/user-data
#- sed -i '/^reboot$/d' /root/user-data
#- set -x; cd /root && /bin/bash -xe ./user-data
- - set -x; cd /root/config-drive && /bin/bash -xe ./user-data
-
- #- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - |
+ set -x
+ cd /root/config-drive
+ if /bin/bash -xe ./user-data > /var/log/cloud-init-user-data.log; then
+ touch /is_cloud_init_finished
+ else
+ set +x
+ echo "bootstrap script /root/config-drive/user-data failed. tail -n 10 /var/log/cloud-init-user-data.log:\n" > /is_cloud_init_failed
+ tail -n 10 /var/log/cloud-init-user-data.log >> /is_cloud_init_failed
+ fi
# Enable root access (after reboot)
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- #- service sshd stop
-
- ########################################################
- # Node is ready, allow SSH access
- - touch /is_cloud_init_finished
- #- reboot
- ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
index 77c18d1..4b134b7 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
@@ -61,19 +61,19 @@
#- cp /root/config-drive/user-data /root/user-data
#- sed -i '/^reboot$/d' /root/user-data
#- set -x; cd /root && /bin/bash -xe ./user-data
- - set -x; cd /root/config-drive && /bin/bash -xe ./user-data
-
- #- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - |
+ set -x
+ cd /root/config-drive
+ if /bin/bash -xe ./user-data > /var/log/cloud-init-user-data.log; then
+ touch /is_cloud_init_finished
+ else
+ set +x
+ echo "bootstrap script /root/config-drive/user-data failed. tail -n 10 /var/log/cloud-init-user-data.log:\n" > /is_cloud_init_failed
+ tail -n 10 /var/log/cloud-init-user-data.log >> /is_cloud_init_failed
+ fi
# Enable root access (after reboot)
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- #- service sshd stop
-
- ########################################################
- # Node is ready, allow SSH access
- - touch /is_cloud_init_finished
- #- reboot
- ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
index 91208a4..0f806cf 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
@@ -17,9 +17,9 @@
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
+# - features_designate_pool_manager_database
+# - features_designate_pool_manager
+# - features_designate_pool_manager_keystone
- linux_system_codename_xenial
interfaces:
ens3:
@@ -34,8 +34,8 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+# - features_designate_pool_manager_database
+# - features_designate_pool_manager
- linux_system_codename_xenial
interfaces:
ens3:
@@ -50,8 +50,8 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
+# - features_designate_pool_manager_database
+# - features_designate_pool_manager
- linux_system_codename_xenial
interfaces:
ens3:
@@ -63,7 +63,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_pool_manager_proxy
+# - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -201,36 +201,36 @@
ens6:
role: bond1_ab_ovs_floating
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.extra
- - system.linux.system.repo.mcp.apt_mirantis.openstack
- - system.linux.system.repo.mcp.apt_mirantis.ubuntu
- - system.linux.system.repo.mcp.apt_mirantis.saltstack
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
+# dns01.mcp11-ovs-dpdk.local:
+# reclass_storage_name: openstack_dns_node01
+# roles:
+# - features_designate_pool_manager_dns
+# - linux_system_codename_xenial
+# classes:
+# - system.linux.system.repo.mcp.extra
+# - system.linux.system.repo.mcp.apt_mirantis.openstack
+# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+# - system.linux.system.repo.mcp.apt_mirantis.saltstack
+# interfaces:
+# ens3:
+# role: single_dhcp
+# ens4:
+# role: single_ctl
+# single_address: ${_param:openstack_dns_node01_address}
+#
+# dns02.mcp11-ovs-dpdk.local:
+# reclass_storage_name: openstack_dns_node02
+# roles:
+# - features_designate_pool_manager_dns
+# - linux_system_codename_xenial
+# classes:
+# - system.linux.system.repo.mcp.extra
+# - system.linux.system.repo.mcp.apt_mirantis.openstack
+# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+# - system.linux.system.repo.mcp.apt_mirantis.saltstack
+# interfaces:
+# ens3:
+# role: single_dhcp
+# ens4:
+# role: single_ctl
+# single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index 26d7992..5b8bbf7 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -141,19 +141,19 @@
skip_fail: false
# isntall designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+#- description: Install powerdns
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@powerdns:server' state.sls powerdns.server
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
+#- description: Install designate
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@designate:server' state.sls designate -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 5, delay: 10}
+# skip_fail: false
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
index d8ed2c1..f135f1e 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -30,8 +30,8 @@
{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
template:
@@ -61,8 +61,8 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+90, -10]
@@ -89,8 +89,8 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+90, -10]
@@ -117,8 +117,8 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+10, -10]
@@ -145,8 +145,8 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_DNS01 }}: +111
- default_{{ HOSTNAME_DNS02 }}: +112
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+10, -10]
@@ -711,54 +711,54 @@
interfaces: *all_interfaces
network_config: *all_network_config
- - name: {{ HOSTNAME_DNS01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_DNS02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
+# - name: {{ HOSTNAME_DNS01 }}
+# role: salt_minion
+# params:
+# vcpu: !os_env SLAVE_NODE_CPU, 1
+# memory: !os_env SLAVE_NODE_MEMORY, 2048
+# boot:
+# - hd
+# cloud_init_volume_name: iso
+# cloud_init_iface_up: ens3
+# volumes:
+# - name: system
+# capacity: !os_env NODE_VOLUME_SIZE, 150
+# backing_store: mcp_ubuntu_1604_image
+# format: qcow2
+# - name: iso # Volume with name 'iso' will be used
+# # for store image with cloud-init metadata.
+# capacity: 1
+# format: raw
+# device: cdrom
+# bus: ide
+# cloudinit_meta_data: *cloudinit_meta_data
+# cloudinit_user_data: *cloudinit_user_data_1604
+#
+# interfaces: *all_interfaces
+# network_config: *all_network_config
+#
+# - name: {{ HOSTNAME_DNS02 }}
+# role: salt_minion
+# params:
+# vcpu: !os_env SLAVE_NODE_CPU, 1
+# memory: !os_env SLAVE_NODE_MEMORY, 2048
+# boot:
+# - hd
+# cloud_init_volume_name: iso
+# cloud_init_iface_up: ens3
+# volumes:
+# - name: system
+# capacity: !os_env NODE_VOLUME_SIZE, 150
+# backing_store: mcp_ubuntu_1604_image
+# format: qcow2
+# - name: iso # Volume with name 'iso' will be used
+# # for store image with cloud-init metadata.
+# capacity: 1
+# format: raw
+# device: cdrom
+# bus: ide
+# cloudinit_meta_data: *cloudinit_meta_data
+# cloudinit_user_data: *cloudinit_user_data_1604
+#
+# interfaces: *all_interfaces
+# network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
index 8ac0a05..4c7091b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
@@ -17,10 +17,10 @@
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
+ # - features_designate_bind9_database
+ # - features_designate_bind9_dns
+ # - features_designate_bind9
+ # - features_designate_bind9_keystone
- linux_system_codename_xenial
interfaces:
ens3:
@@ -35,9 +35,9 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
+ # - features_designate_bind9_database
+ # - features_designate_bind9_dns
+ # - features_designate_bind9
- linux_system_codename_xenial
interfaces:
ens3:
@@ -52,8 +52,8 @@
- openstack_control
- openstack_database
- openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
+ # - features_designate_bind9_database
+ # - features_designate_bind9
- linux_system_codename_xenial
interfaces:
ens3:
@@ -65,7 +65,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_bind9_proxy
+ # - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
index 9a8e2ab..b1642b2 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -119,19 +119,19 @@
skip_fail: false
# isntall designate
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+#- description: Install bind
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@bind:server' state.sls bind
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
+#- description: Install designate
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@designate:server' state.sls designate -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 5, delay: 10}
+# skip_fail: false
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index 2f340e0..a21a23c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -47,10 +47,10 @@
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
# reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
# Workaround of missing reclass.system for dns role
- salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index 0e607dc..0c82575 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -48,8 +48,8 @@
# reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
# Bind9 services are placed on the first two ctl nodes
- salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
- salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
deleted file mode 100644
index fbf3a06..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
deleted file mode 100644
index 441b300..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml
deleted file mode 100644
index 3dbefb8..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/salt.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'mk24_lab_ovs_dvr_vlan_bm/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/qa') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
- "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
- git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
- cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
- cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: '*Workaround 2/2* of the bug PROD-9576 to get bond0-connectivity on cfg01 *without* reboot'
- cmd: cat /etc/network/interfaces | grep bond-slaves | awk '{print $2}' | xargs -I {} ifenslave bond0 {}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Refresh pillars for present baremetal nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources for present baremetal nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure linux for present baremetal nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
- cfg01*' state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: ovs-dvr-vlan model specific Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-#- description: Execute salt.minion.cert
-# cmd: salt-call --no-color state.sls salt.minion.cert -l info;
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
deleted file mode 100644
index e386337..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup enp8s0f0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup enp8s0f1
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto enp8s0f0
- iface enp8s0f0 inet dhcp
- auto enp8s0f1
- iface enp8s0f1 inet manual
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml
deleted file mode 100644
index a426f10..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup enp8s0f0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup enp8s0f1
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto enp8s0f0
- iface enp8s0f0 inet dhcp
- auto enp8s0f1
- iface enp8s0f1 inet dhcp
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml
deleted file mode 100644
index 6b3b0c5..0000000
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay.yaml
+++ /dev/null
@@ -1,366 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'mk24_lab_ovs_dvr_vlan_bm/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'mk24_lab_ovs_dvr_vlan_bm') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'mk24_lab_ovs_dvr_vlan_bm_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.18.173.96/27:27') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_KVM01 }}: 172.18.173.112
- default_{{ HOSTNAME_KVM02 }}: 172.18.173.113
- default_{{ HOSTNAME_KVM03 }}: 172.18.173.114
- default_{{ HOSTNAME_CMP01 }}: 172.18.173.115
- default_{{ HOSTNAME_CMP02 }}: 172.18.173.116
- default_{{ HOSTNAME_GTW01 }}: 172.18.173.117
- default_{{ HOSTNAME_CFG01 }}: 172.18.173.118
-
- groups:
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
-
-
- nodes:
-
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp8s0f1 # see 'interfaces' below.
- # this interface is passed to 'user-data'
- # to substitute {interface_name} variable if it is used there
-
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: enp8s0f1
- l2_network_device: admin
- mac_address: !os_env PXE_MAC_ADDRESS_CFG01
-
- network_config:
- enp8s0f1:
- networks:
- - admin
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp8s0f1 # see 'interfaces' below.
- # this interface is passed to 'user-data'
- # to substitute {interface_name} variable if it is used there
-
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp8s0f1
- l2_network_device: admin
- mac_address: !os_env PXE_MAC_ADDRESS_KVM01
-
- network_config:
- enp8s0f1:
- networks:
- - admin
-
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp8s0f1 # see 'interfaces' below.
- # this interface is passed to 'user-data'
- # to substitute {interface_name} variable if it is used there
-
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp8s0f1
- l2_network_device: admin
- mac_address: !os_env PXE_MAC_ADDRESS_KVM02
-
- network_config:
- enp8s0f1:
- networks:
- - admin
-
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp8s0f1 # see 'interfaces' below.
- # this interface is passed to 'user-data'
- # to substitute {interface_name} variable if it is used there
-
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp8s0f1
- l2_network_device: admin
- mac_address: !os_env PXE_MAC_ADDRESS_KVM03
-
- network_config:
- enp8s0f1:
- networks:
- - admin
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp8s0f1 # see 'interfaces' below.
- # this interface is passed to 'user-data'
- # to substitute {interface_name} variable if it is used there
-
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp8s0f1
- l2_network_device: admin
- mac_address: !os_env PXE_MAC_ADDRESS_CMP01
-
- network_config:
- enp8s0f1:
- networks:
- - admin
-
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp8s0f1 # see 'interfaces' below.
- # this interface is passed to 'user-data'
- # to substitute {interface_name} variable if it is used there
-
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp8s0f1
- l2_network_device: admin
- mac_address: !os_env PXE_MAC_ADDRESS_CMP02
-
- network_config:
- enp8s0f1:
- networks:
- - admin
-
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp8s0f1 # see 'interfaces' below.
- # this interface is passed to 'user-data'
- # to substitute {interface_name} variable if it is used there
-
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp8s0f1
- l2_network_device: admin
- mac_address: !os_env PXE_MAC_ADDRESS_GTW01
-
- network_config:
- enp8s0f1:
- networks:
- - admin
-
-
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
index 745df96..4f2df46 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
@@ -1,4 +1,4 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install OpenStack control services
diff --git a/tcp_tests/templates/physical_mcp11_dvr/Readme.txt b/tcp_tests/templates/physical_mcp11_dvr/Readme.txt
deleted file mode 100644
index a3297a8..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/Readme.txt
+++ /dev/null
@@ -1 +0,0 @@
-PoC templates. Do not use!
\ No newline at end of file
diff --git a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml b/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
deleted file mode 100644
index cb176c1..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-{% from 'physical_mcp11_dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml b/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
deleted file mode 100644
index e01ee0f..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
-{% from 'physical_mcp11_dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_dvr/salt.yaml b/tcp_tests/templates/physical_mcp11_dvr/salt.yaml
deleted file mode 100644
index 65e7458..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/salt.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-{% from 'physical_mcp11_dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'physical_mcp11_dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'physical_mcp11_dvr/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-
-- description: Configure linux for present baremetal nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
- cfg01*' state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-9576 to get bond0-connectivity *without* rebooting nodes'
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- "cat /etc/network/interfaces | grep bond-slaves | awk '{print \$2}' | xargs -I {} ifenslave bond0 {}"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: ovs-dvr-vlan model specific Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Execute salt.minion.cert
- cmd: salt-call --no-color state.sls salt.minion.cert -l info;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay--meta-data.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index f64ad5e..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data1604.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 43d9175..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay.yaml
deleted file mode 100644
index dbd5f35..0000000
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay.yaml
+++ /dev/null
@@ -1,458 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'physical_mcp11_dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'physical_mcp11_dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'physical_mcp11_dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA_1604 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW01', 'gtw02.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'physical_mcp11_dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.164.0/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: 172.16.164.10
- default_{{ HOSTNAME_KVM01 }}: 172.16.164.6
- default_{{ HOSTNAME_KVM02 }}: 172.16.164.7
- default_{{ HOSTNAME_KVM03 }}: 172.16.164.8
- default_{{ HOSTNAME_CMP001 }}: 172.16.164.2
- default_{{ HOSTNAME_CMP002 }}: 172.16.164.3
- default_{{ HOSTNAME_GTW01 }}: 172.16.164.61
- default_{{ HOSTNAME_GTW02 }}: 172.16.164.5
-
- groups:
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- admin:
- address_pool: admin-pool01
-
-
- nodes:
-
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: eth0
- mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- - label: eth1
- l2_network_device: admin
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
- network_config:
- eth0:
- networks:
- - infra
- eth1:
- networks:
- - admin
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eth0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: eth1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
- # there is no eth2 interface on the node
- #- label: eth2
- # mac_address: !os_env ETH2_MAC_ADDRESS_KVM01
-
- network_config:
- eth0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth1
- #- eth2
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eth0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: eth1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
- - label: eth2
- mac_address: !os_env ETH2_MAC_ADDRESS_KVM02
-
- network_config:
- eth0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth1
- - eth2
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eth0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- - label: eth1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
- - label: eth2
- mac_address: !os_env ETH2_MAC_ADDRESS_KVM03
-
- network_config:
- eth0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth1
- - eth2
-
-
- - name: {{ HOSTNAME_CMP001 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eth0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: eth1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: eth2
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
-
- network_config:
- eth0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth1
- - eth2
-
-
- - name: {{ HOSTNAME_CMP002 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eth0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- - label: eth1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- - label: eth2
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
-
- network_config:
- eth0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth1
- - eth2
-
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eth0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
- - label: eth1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
- - label: eth2
- mac_address: !os_env ETH2_MAC_ADDRESS_GTW01
-
- network_config:
- eth0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth1
- - eth2
-
- - name: {{ HOSTNAME_GTW02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eth0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
- - label: eth1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
- # there is no eth2 interface on the node
- #- label: eth2
- # mac_address: !os_env ETH2_MAC_ADDRESS_GTW02
-
- network_config:
- eth0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth1
- #- eth2
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/Readme.txt b/tcp_tests/templates/physical_mcp11_ovs_dpdk/Readme.txt
deleted file mode 100644
index a3297a8..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/Readme.txt
+++ /dev/null
@@ -1 +0,0 @@
-PoC templates. Do not use!
\ No newline at end of file
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
deleted file mode 100644
index 0c30920..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
+++ /dev/null
@@ -1,131 +0,0 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-- description: Refresh grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
deleted file mode 100644
index 83674b2..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
+++ /dev/null
@@ -1,161 +0,0 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml
deleted file mode 100644
index 1a14b23..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml
+++ /dev/null
@@ -1,136 +0,0 @@
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-# Environment model name stored in https://github.com/Mirantis/tcp-qa/tree/master/tcp_tests/environments
-{% set ENVIRONMENT_MODEL_NAME = os_env('ENVIRONMENT_MODEL_NAME','lab03_ovs_dpdk') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
-
-- description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
- cmd: |
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/;
- reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
- reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
- git clone https://github.com/Mirantis/tcp-qa /tmp/tcp-qa;
- ln -s /tmp/tcp-qa/tcp_tests/environment/ /srv/salt/reclass/classes;
- if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_NAME }}$"; then
- reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_NAME }}' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml --merge ;
- fi;
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-########################################
-# Spin up Control Plane VMs on KVM nodes
-########################################
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: Refresh pillars for present baremetal nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: '*Workaround* enable hugepages on cmp* nodes for OVS setup in linux formula'
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'sudo apt-get install -y hugepages; sudo echo 2048 > /proc/sys/vm/nr_hugepages'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources for present baremetal nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-#- description: '*Workaround* Avoid reboot when IP addresses are doubled on interfaces and bridges at the same time. For test environments only!'
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'kvm*' cmd.run
-# "salt-call state.sls linux.network.interface && ls -1 /var/run/ | grep dhclient | awk -F'.' '{print \$2}' | xargs -I {} ifconfig {} 0.0.0.0"
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 5}
-# skip_fail: false
-
-- description: Configure linux for present baremetal nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
- cfg01*' state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: ovs-dvr-vlan model specific Execute 'libvirt' states to create necessary libvirt networks
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Create VMs for control plane
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
- cmd: |
- salt-key -l acc| sort > /tmp/current_keys.txt &&
- salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 20, delay: 30}
- skip_fail: false
-
-#########################################
-# Configure all running salt minion nodes
-#########################################
-
-- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Show reclass-salt --top for generated nodes
- cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Execute salt.minion on config node to generate certificate
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls salt.minion;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--meta-data.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml
deleted file mode 100644
index ee878a2..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown eth0
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup eth0
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
- # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
- - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.4.0/24 -D
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml
deleted file mode 100644
index a8e8250..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- #- sudo ifup eth0
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - apt-get install {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }} -y
- - reboot
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
-
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /etc/udev/rules.d/70-persistent-net.rules
- owner: root:root
- permissions: '0644'
- content: |
- # kvm01
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f1"
- # kvm02
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f1"
- # kvm03
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM03') }}", NAME="eno1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM03') }}", NAME="eno2"
- # cmp001
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f2"
- # cmp002
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP002') }}", NAME="eno1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP002') }}", NAME="eth0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP002') }}", NAME="eth3"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP002') }}", NAME="eth2"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP002') }}", NAME="eth4"
- # gtw01
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f1"
- # gtw02
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW02') }}", NAME="eno1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW02') }}", NAME="eno2"
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml
deleted file mode 100644
index e555d3c..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml
+++ /dev/null
@@ -1,114 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- #- sudo ifup eth0
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- # The loopback network interface
- auto lo
- iface lo inet loopback
-
- auto {interface_name}
- iface {interface_name} inet dhcp
-
- - path: /etc/udev/rules.d/70-persistent-net.rules
- owner: root:root
- permissions: '0644'
- content: |
- # kvm01
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f1"
- # kvm02
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f1"
- # kvm03
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM03') }}", NAME="eno1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM03') }}", NAME="eno2"
- # cmp001
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f2"
- # cmp002
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP002') }}", NAME="eno1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP002') }}", NAME="eth0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP002') }}", NAME="eth3"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP002') }}", NAME="eth2"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP002') }}", NAME="eth4"
- # gtw01
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f0"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f1"
- # gtw02
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW02') }}", NAME="eno1"
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW02') }}", NAME="eno2"
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
deleted file mode 100644
index c27ccb4..0000000
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
+++ /dev/null
@@ -1,572 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') %}
-{# set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' #}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'mcp11-ovs-dpdk.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.3') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.31') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-
-{% import 'physical_mcp11_ovs_dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'physical_mcp11_ovs_dpdk/underlay--user-data.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
- - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
-
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'physical_mcp11_ovs_dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
- params:
- ip_reserved:
- gateway: +62
- l2_network_device: +61
- default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
- virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
- virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
- virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
- virtual_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
- #ip_ranges:
- # dhcp: [+2, -4]
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.4.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.6.0/24:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: -2
-
- groups:
-
- - name: virtual
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
-
- network_pools:
- admin: admin-pool01
-
- l2_network_devices:
- # Ironic management interface
- admin:
- address_pool: admin-pool01
- dhcp: false
- parent_iface:
- phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
-
- group_volumes:
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- format: qcow2
- - name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cfg01_day01_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- #ens4:
- # networks:
- # - private
-
-
- - name: default
- driver:
- name: devops_driver_ironic
- params:
- os_auth_token: fake-token
- ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
- # to access Ironic API
- # Agent URL that is accessible from deploying node when nodes
- # are bootstrapped with PXE. Usually PXE/provision network address is used.
- agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
- agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
-
- network_pools:
- admin: admin-pool01
-
- nodes:
-
- # - name: {{ HOSTNAME_CFG01 }}
- # role: salt_master
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
-
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_cfg01
-
- # interfaces:
- # - label: enp3s0f0 # Infra interface
- # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
- # - label: enp3s0f1
- # l2_network_device: admin
- # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-
- # network_config:
- # enp3s0f0:
- # networks:
- # - infra
- # enp3s0f1:
- # networks:
- # - admin
-
- - name: {{ HOSTNAME_KVM01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp2s0f1
-
- - name: {{ HOSTNAME_KVM02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp2s0f1
-
- - name: {{ HOSTNAME_KVM03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: eno1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: eno1
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- - label: eno2
- mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
-
- network_config:
- eno1:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eno2
-
-
- - name: {{ HOSTNAME_CMP001 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- - label: enp5s0f2
- mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
- features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
- - enp5s0f0
-
-
-
- - name: {{ HOSTNAME_CMP002 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: eno1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: eno1
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- - label: eth0
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- - label: eth3
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- - label: eth2
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- - label: eth4
- mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
- features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
- network_config:
- eno1:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eth0
- - eth3
-
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: enp3s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
- - label: enp3s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
-
- network_config:
- enp3s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp3s0f1
-
- - name: {{ HOSTNAME_GTW02 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_GTW02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: eno1 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_hwe
-
- interfaces:
- - label: eno1
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
- - label: eno2
- mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
-
- network_config:
- eno1:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - eno2
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
index 33b4c38..df07af8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
@@ -20,7 +20,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "auditd"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml b/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
deleted file mode 100644
index 7b19b50..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
+++ /dev/null
@@ -1,117 +0,0 @@
-{% from 'virtual-mcp10-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml b/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
deleted file mode 100644
index 8732168..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
+++ /dev/null
@@ -1,239 +0,0 @@
-{% from 'virtual-mcp10-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-- description: Downgrade libc for glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' cmd.run ' apt install libc6=2.19-0ubuntu6.11 -y --force-yes'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-#- description: Restart apache due to PROD-10477
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 15}
-# skip_fail: false
-#
-#- description: Check apache status to PROD-10477
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 15}
-# skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-#
-#- description: Install neutron on gtw node
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@neutron:gateway' state.sls neutron
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-
-#- description: Check neutron agent-list
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# install contrail
-- description: Install contrail db
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
-- description: Install contrail on 1st node and skip client part
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-- description: Install contrail on all nodes still skipping client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=Falsa
- -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Install contrail and do client part as well
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure contrail
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check contrail status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' cmd.run contrail-status
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-- description: Provision Contrail-vRouter on Computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Opencontrail on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls 'opencontrail'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Reboot Opencontrail compute nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' system.reboot
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp10-contrail/salt.yaml
deleted file mode 100644
index 79420c1..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/salt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{% from 'virtual-mcp10-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp10-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp10-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=true) }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/sl.yaml b/tcp_tests/templates/virtual-mcp10-contrail/sl.yaml
deleted file mode 100644
index 4cbd37a..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/sl.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-{% from 'virtual-mcp10-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# dummy
-- description: dummy
- cmd: exit 0
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp10-contrail/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 8f039fd..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data1404.yaml
deleted file mode 100644
index f93644d..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup eth1
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
- #- echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main" > /etc/apt/sources.list.d/saltstack.list
- #- wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3 trusty main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - eatmydata apt-get clean
- - eatmydata apt-get update
- - eatmydata apt-get -y upgrade
-
- # Install common packages
- - apt-get install -y python-pip git
- - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
- - apt-get install -y salt-minion
-
- # To be configured from inventory/fuel-devops by operator or autotests
- - 'echo "id: {hostname}" >> /etc/salt/minion'
- - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
- - echo "Restarting minion service with workarounds..."
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
- - sleep 5
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
-
- #- echo "Showing node metadata..."
- #- salt-call pillar.data
-
- #- echo "Running complete state ..."
- #- salt-call state.sls linux,openssh,salt
-
- # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
- - apt-get -y install --install-recommends linux-generic-lts-xenial
- - reboot
-
- ########################################################
- # Node is ready, allow SSH access
- ##- echo "Allow SSH access ..."
- ##- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
- auto eth1
- iface eth1 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data1604.yaml
deleted file mode 100644
index 6dc3309..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - eatmydata apt-get clean
- - eatmydata apt-get update && eatmydata apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp10-contrail/underlay.yaml
deleted file mode 100644
index eb64cf1..0000000
--- a/tcp_tests/templates/virtual-mcp10-contrail/underlay.yaml
+++ /dev/null
@@ -1,525 +0,0 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-
-{% import 'virtual-mcp10-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp10-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp10-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
-{% import 'virtual-mcp10-contrail/underlay--user-data1404.yaml' as CLOUDINIT_USER_DATA_1404 with context %}
-
----
-aliases:
- - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
- - &cloudinit_user_data_1404 {{ CLOUDINIT_USER_DATA_1404 }}
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-contrail') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_VSRX01 = os_env('HOSTNAME_VSRX01', 'vsrx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'mcp10-cntrl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
-
- address_pools:
- private-pool01:
- net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_VSRX01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_VSRX01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_VSRX01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
- default_{{ HOSTNAME_VSRX01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
- format: qcow2
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
- format: qcow2
-
- - name: vsrx_image # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env VSRX_IMAGE # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: &interfaces
- - label: eth0
- l2_network_device: admin
- interface_model: *interface_model
- - label: eth1
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- eth0:
- networks:
- - admin
- eth1:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1404
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4098
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 120
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4098
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 120
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4098
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 120
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_VSRX01 }}
- role: vsrx_gtw
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 1048
- boot:
- - hd
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: vsrx_image
- format: qcow2
-
- interfaces:
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config:
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
deleted file mode 100644
index 920286d..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-{% from 'virtual-mcp10-dvr/map.jinja' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' cmd.run 'ip a | grep 172.16.10.2' | grep -B1 172.16.10.2
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/map.jinja b/tcp_tests/templates/virtual-mcp10-dvr/map.jinja
deleted file mode 100644
index 138534c..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/map.jinja
+++ /dev/null
@@ -1,2 +0,0 @@
-{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-dvr') + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
deleted file mode 100644
index 05d07be..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
+++ /dev/null
@@ -1,158 +0,0 @@
-{% from 'virtual-mcp10-dvr/map.jinja' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install keystone service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; keystone service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure glusterfs.client on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet tokens for keystone server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp10-dvr/salt.yaml
deleted file mode 100644
index 81fb04a..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/salt.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-{% from 'virtual-mcp10-dvr/map.jinja' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp10-dvr/map.jinja' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp10-dvr/map.jinja' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set SALT_VERSION = os_env('SALT_VERSION', '2016.3') %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install salt to the config node
-
-- description: Configure repository on the cfg01 node
- cmd:
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/" + SALT_VERSION+ "/SALTSTACK-GPG-KEY.pub | apt-key add -;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Update packages on cfg01
- cmd: apt-get clean; apt-get update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Install common packages on cfg01
- cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon"') }}
-
-- description: Configure salt-minion on cfg01
- cmd: |
- [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
- cat << "EOF" >> /etc/salt/minion.d/minion.conf
- id: {{ HOSTNAME_CFG01 }}
- master: 127.0.0.1
- EOF
- apt-get install -y salt-minion;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-
-- description: Restart services
- cmd: |
- systemctl restart salt-master;
- systemctl restart salt-minion;
- echo "Showing system info and metadata ...";
- salt-call --no-color grains.items;
- salt-call --no-color pillar.data;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del 172.16.10.110/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del 172.16.10.105/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del 172.16.10.106/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp10-dvr/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 75fef7c..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data1404.yaml
deleted file mode 100644
index 487ccfa..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup eth1
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
- #- echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main" > /etc/apt/sources.list.d/saltstack.list
- #- wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }} trusty main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - apt-get update
- - apt-get -y upgrade
-
- # Install common packages
- - apt-get install -y python-pip git
- - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
- - apt-get install -y salt-minion
-
- # To be configured from inventory/fuel-devops by operator or autotests
- - 'echo "id: {hostname}" >> /etc/salt/minion'
- - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
- - echo "Restarting minion service with workarounds..."
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
- - sleep 5
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
-
- #- echo "Showing node metadata..."
- #- salt-call pillar.data
-
- #- echo "Running complete state ..."
- #- salt-call state.sls linux,openssh,salt
-
- # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
- - apt-get -y install --install-recommends linux-generic-lts-xenial
- - reboot
-
- ########################################################
- # Node is ready, allow SSH access
- ##- echo "Allow SSH access ..."
- ##- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
- auto eth1
- iface eth1 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data1604.yaml
deleted file mode 100644
index 3e0c1fd..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
- #- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
- #- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - apt-get update
- - apt-get -y upgrade
-
- # Install common packages
- - apt-get install -y python-pip git
- - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
- - apt-get install -y salt-minion
-
- # To be configured from inventory/fuel-devops by operator or autotests
- - 'echo "id: {hostname}" >> /etc/salt/minion'
- - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
- - echo "Restarting minion service with workarounds..."
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
- - sleep 5
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
-
- #- echo "Showing node metadata..."
- #- salt-call pillar.data
-
- #- echo "Running complete state ..."
- #- salt-call state.sls linux,openssh,salt
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp10-dvr/underlay.yaml
deleted file mode 100644
index e163d23..0000000
--- a/tcp_tests/templates/virtual-mcp10-dvr/underlay.yaml
+++ /dev/null
@@ -1,413 +0,0 @@
----
-aliases:
- default_interface_model:
- - &interface_model !os_env INTERFACE_MODEL, virtio
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-dvr') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp10-dvr') }}
-
- address_pools:
- private-pool01:
- net: 172.16.10.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: 192.168.10.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: 10.1.0.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: 10.16.0.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
- format: qcow2
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data-cfg01.yaml
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: &interfaces
- - label: eth0
- l2_network_device: admin
- interface_model: *interface_model
- - label: eth1
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- eth0:
- networks:
- - admin
- eth1:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/common-services.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/common-services.yaml
deleted file mode 100644
index 276d495..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/common-services.yaml
+++ /dev/null
@@ -1,147 +0,0 @@
-{% from 'virtual-mcp10-ovs/map.jinja' import HOSTNAME_CFG01 with context %}
-
-################### Install OpenStack infra ##########################
-
-# salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
-- description: Install and run GlusterFS
- do: enforceState
- target: I@glusterfs:server
- state: glusterfs.server.service
-
-# // Install keepaliveds
-# //runSaltProcessStep(master, 'I@keepalived:cluster', 'state.sls', ['keepalived'], 1)
-# salt.enforceState(saltMaster, 'I@keepalived:cluster and *01*', 'keepalived', true)
-# salt.enforceState(saltMaster, 'I@keepalived:cluster', 'keepalived', true)
-- description: Install keepalived
- do: enforceState
- target: I@keepalived:cluster and *01*
- state: keepalived
-
-- description: Re run installation of keepalived
- do: enforceState
- target: I@keepalived:cluster
- state: keepalived
-
-# // Check the keepalived VIPs
-# salt.runSaltProcessStep(saltMaster, 'I@keepalived:cluster', 'cmd.run', ['ip a | grep 172.16.10.2'])
-# salt.enforceState(saltMaster, 'I@glusterfs:server and *01*', 'glusterfs.server.setup', true)
-# salt.runSaltProcessStep(saltMaster, 'I@glusterfs:server', 'cmd.run', ['gluster peer status'], null, true)
-# salt.runSaltProcessStep(saltMaster, 'I@glusterfs:server', 'cmd.run', ['gluster volume status'], null, true)
-
-- description: Show VIPs
- do: runState
- target: I@keepalived:cluster
- state: cmd.run
- args: ['ip a | grep 172.16.10.2']
-
-- description: Re run Gluster sertver setup
- do: enforceState
- target: I@glusterfs:server and *01*
- state: glusterfs.server.setup
-
-- description: Show Gluster peer status
- do: runState
- target: I@glusterfs:server
- state: cmd.run
- args: ['gluster peer status']
-
-- description: Show Gluster volumes status
- do: runState
- target: I@glusterfs:server
- state: cmd.run
- args: ['gluster volume status']
-
-
- # // Install rabbitmq
- # withEnv(['ASK_ON_ERROR=false']){
- # retry(2) {
- # salt.enforceState(saltMaster, 'I@rabbitmq:server', 'rabbitmq', true)
- # }
- # }
- # // Check the rabbitmq status
- # salt.runSaltProcessStep(saltMaster, 'I@rabbitmq:server', 'cmd.run', ['rabbitmqctl cluster_status'])
-
-- description: Install rabbitmq
- do: enforceState
- target: I@rabbitmq:server
- state: rabbitmq
- retry: {count: 2, delay: 5}
-
-- description: Show rabbitmq status
- do: runState
- target: I@glusterfs:server
- state: cmd.run
- args: ['rabbitmqctl cluster_status']
-
- # // Install galera
- # withEnv(['ASK_ON_ERROR=false']){
- # retry(2) {
- # salt.enforceState(saltMaster, 'I@galera:master', 'galera', true)
- # }
- # }
- # salt.enforceState(saltMaster, 'I@galera:slave', 'galera', true)
-
- # // Check galera status
- # salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'mysql.status')
- # salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'mysql.status')
-
- # // // Setup mysql client
- # // salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client', true)
-
-
-- description: Install Galera (master)
- do: enforceState
- target: I@galera:master
- state: galera
- retry: {count: 2, delay: 5}
-
-- description: Install Galera (slaves)
- do: enforceState
- target: I@galera:slave
- state: galera
-
-- description: Show master galera status
- do: runState
- target: I@galera:master
- state: mysql.status
-
-- description: Show master galera status
- do: runState
- target: I@galera:master
- state: mysql.status
-
-# - description: Install mysql client
-# do: enforceState
-# target: I@mysql:client
-# state: mysql.client
-
-
-# // Install haproxy
-# salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy', true)
-# salt.runSaltProcessStep(saltMaster, 'I@haproxy:proxy', 'service.status', ['haproxy'])
-# salt.runSaltProcessStep(saltMaster, 'I@haproxy:proxy', 'service.restart', ['rsyslog'])
-
-
-- description: Install HAProxy
- do: enforceState
- target: I@haproxy:proxy
- state: haproxy
-
-- description: Show HAProxy service status
- do: runState
- target: I@haproxy:proxy
- state: service.status
- args: ['haproxy']
-
-- description: Restart HAProxy service
- do: runState
- target: I@haproxy:proxy
- state: service.restart
- args: ['haproxy']
-
-# // Install memcached
-# salt.enforceState(saltMaster, 'I@memcached:server', 'memcached', true)
-- description: Install Memcached
- do: enforceState
- target: I@memcached:server
- state: memcached
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/map.jinja b/tcp_tests/templates/virtual-mcp10-ovs.new/map.jinja
deleted file mode 100644
index 424909f..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/map.jinja
+++ /dev/null
@@ -1,2 +0,0 @@
-{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-ovs') + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/openstack.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/openstack.yaml
deleted file mode 100644
index 88b1ff8..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/openstack.yaml
+++ /dev/null
@@ -1,255 +0,0 @@
-{% from 'virtual-mcp10-ovs/map.jinja' import DOMAIN_NAME with context %}
-{% from 'virtual-mcp10-ovs/map.jinja' import HOSTNAME_CFG01 with context %}
-
-
-################### Install OpenStack control ##########################
-
-# // Install horizon dashboard
-# salt.enforceState(saltMaster, 'I@horizon:server', 'horizon', true)
-# salt.enforceState(saltMaster, 'I@nginx:server', 'nginx', true)
-
-- description: Install Horizon
- do: enforceState
- target: I@horizon:server
- state: horizon
-
-- description: Install nginx
- do: enforceState
- target: I@nginx:server
- state: nginx
-
-# // setup keystone service
-# //runSaltProcessStep(saltMaster, 'I@keystone:server', 'state.sls', ['keystone.server'], 1)
-# salt.enforceState(saltMaster, 'I@keystone:server and *01*', 'keystone.server', true)
-# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
-# // populate keystone services/tenants/roles/users
-
-- description: Install Keystone on 01
- do: enforceState
- target: I@keystone:server and *01*
- state: keystone.server
-
-- description: Install Keystone
- do: enforceState
- target: I@keystone:server
- state: keystone.server
-
-# // keystone:client must be called locally
-# //salt.runSaltProcessStep(saltMaster, 'I@keystone:client', 'cmd.run', ['salt-call state.sls keystone.client'], null, true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'service.restart', ['apache2'])
-# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
-# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; keystone service-list'], null, true)
-
-# - description: Install Keystone client
-# do: runState
-# target: I@keystone:client
-# state: cmd.run
-# args: ['salt-call state.sls keystone.client']
-
-- description: Restart apache on Keystone servers
- do: runState
- target: I@keystone:server
- state: service.restart
- args: ['apache2']
-
-- description: Install Keystone Client
- do: enforceState
- target: I@keystone:client
- state: keystone.client
-
-- description: Install Keystone Client
- do: enforceState
- target: I@keystone:client
- state: keystone.client
-
-- description: Show Keystone config
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; keystone service-list']
-
-
-# // Install glance and ensure glusterfs clusters
-# //runSaltProcessStep(saltMaster, 'I@glance:server', 'state.sls', ['glance.server'], 1)
-# salt.enforceState(saltMaster, 'I@glance:server and *01*', 'glance.server', true)
-# salt.enforceState(saltMaster, 'I@glance:server', 'glance.server', true)
-# salt.enforceState(saltMaster, 'I@glance:server', 'glusterfs.client', true)
-
-
-- description: Install glance on 01
- do: enforceState
- target: I@glance:server and *01*
- state: glance.server
-
-- description: Install glance
- do: enforceState
- target: I@glance:server
- state: glance.server
-
-- description: Install gluster client on glance servers
- do: enforceState
- target: I@glance:server
- state: glusterfs.client
-
-# // Update fernet tokens before doing request on keystone server
-# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
-
-- description: Update fernet tokens
- do: enforceState
- target: I@keystone:server
- state: keystone.server
-
-# // Check glance service
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; glance image-list'], null, true)
-
-- description: Show glance images via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; glance image-list']
-
-# // Install and check nova service
-# //runSaltProcessStep(saltMaster, 'I@nova:controller', 'state.sls', ['nova'], 1)
-# salt.enforceState(saltMaster, 'I@nova:controller and *01*', 'nova.controller', true)
-# salt.enforceState(saltMaster, 'I@nova:controller', 'nova.controller', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova service-list'], null, true)
-
-- description: Install nova on controllers on 01
- do: enforceState
- target: I@nova:controller and *01*
- state: nova.controller
-
-- description: Install Keystone
- do: enforceState
- target: I@nova:controller
- state: nova.controller
-
-- description: Show nova services via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; nova service-list']
-
-
-
-# // Install and check cinder service
-# //runSaltProcessStep(saltMaster, 'I@cinder:controller', 'state.sls', ['cinder'], 1)
-# salt.enforceState(saltMaster, 'I@cinder:controller and *01*', 'cinder', true)
-# salt.enforceState(saltMaster, 'I@cinder:controller', 'cinder', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; cinder list'], null, true)
-
-
-- description: Install cinder on controllers on 01
- do: enforceState
- target: I@cinder:controller and *01*
- state: cinder
-
-- description: Install cinder on controllers
- do: enforceState
- target: I@cinder:controller
- state: cinder
-
-- description: Show cinder list via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; nova list']
-
-
-# // Install neutron service
-# //runSaltProcessStep(saltMaster, 'I@neutron:server', 'state.sls', ['neutron'], 1)
-
-# salt.enforceState(saltMaster, 'I@neutron:server and *01*', 'neutron.server', true)
-# salt.enforceState(saltMaster, 'I@neutron:server', 'neutron.server', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron agent-list'], null, true)
-
-- description: Install neutron on controllers on 01
- do: enforceState
- target: I@neutron:server and *01*
- state: neutron.server
-
-- description: Install neutron on controllers
- do: enforceState
- target: I@neutron:server
- state: neutron.server
-
-- description: Show neutron agent list via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; neutron agent-list']
-
-# // Install heat service
-# //runSaltProcessStep(saltMaster, 'I@heat:server', 'state.sls', ['heat'], 1)
-# salt.enforceState(saltMaster, 'I@heat:server and *01*', 'heat', true)
-# salt.enforceState(saltMaster, 'I@heat:server', 'heat', true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; heat resource-type-list'], null, true)
-
-- description: Install heat on controllers on 01
- do: enforceState
- target: I@heat:server and *01*
- state: heat
-
-- description: Install heat on controllers
- do: enforceState
- target: I@heat:server
- state: heat
-
-- description: Show heat resource type list via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; heat resource-type-list']
-
-# // Restart nova api
-# salt.runSaltProcessStep(saltMaster, 'I@nova:controller', 'service.restart', ['nova-api'])
-
-- description: Restart nova-api
- do: runState
- target: I@nova:controller
- state: service.restart
- args: ['nova-api']
-
-################### Install OpenStack network ##########################
-
-# // Apply gateway
-# salt.runSaltProcessStep(saltMaster, 'I@neutron:gateway', 'state.apply', [], null, true)
-
-- description: Apply gateway
- do: runState
- target: I@neutron:gateway
- state: state.apply
-
-# // Pring information
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'], null, true)
-# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'], null, true)
-
-- description: Show neutron networks via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; neutron net-list']
-
-- description: Show nova networks via keystone node
- do: runState
- target: I@keystone:server
- state: cmd.run
- args: ['. /root/keystonerc; nova net-list']
-
-
-################### Install OpenStack compute ##########################
-
-# //orchestrate.installOpenstackMkCompute(saltMaster, physical)
-# // Configure compute nodes
-# retry(2) {
-# salt.runSaltProcessStep(saltMaster, 'I@nova:compute', 'state.apply', [], null, true)
-# }
-
-- description: Install Nova compute
- do: runState
- target: I@nova:compute
- state: state.apply
- retry: {count: 2, delay: 5}
-
-
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/salt.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/salt.yaml
deleted file mode 100644
index 8d5a9e3..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/salt.yaml
+++ /dev/null
@@ -1,93 +0,0 @@
-{% from 'virtual-mcp10-ovs/map.jinja' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp10-ovs/map.jinja' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp10-ovs/map.jinja' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set SALT_VERSION = os_env('SALT_VERSION', '2016.3') %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install salt to the config node
-
-- description: Configure repository on the cfg01 node
- cmd:
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Update packages on cfg01
- cmd: apt-get clean; apt-get update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Install common packages on cfg01
- cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon"') }}
-
-- description: Configure salt-minion on cfg01
- cmd: |
- [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
- cat << "EOF" >> /etc/salt/minion.d/minion.conf
- id: {{ HOSTNAME_CFG01 }}
- master: 127.0.0.1
- EOF
- apt-get install -y salt-minion;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-
-- description: Restart services
- cmd: |
- systemctl restart salt-master;
- systemctl restart salt-minion;
- echo "Showing system info and metadata ...";
- salt-call --no-color grains.items;
- salt-call --no-color pillar.data;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del 172.16.10.110/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del 172.16.10.105/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del 172.16.10.106/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 75fef7c..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data1404.yaml
deleted file mode 100644
index 487ccfa..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup eth1
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
- #- echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main" > /etc/apt/sources.list.d/saltstack.list
- #- wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }} trusty main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - apt-get update
- - apt-get -y upgrade
-
- # Install common packages
- - apt-get install -y python-pip git
- - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
- - apt-get install -y salt-minion
-
- # To be configured from inventory/fuel-devops by operator or autotests
- - 'echo "id: {hostname}" >> /etc/salt/minion'
- - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
- - echo "Restarting minion service with workarounds..."
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
- - sleep 5
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
-
- #- echo "Showing node metadata..."
- #- salt-call pillar.data
-
- #- echo "Running complete state ..."
- #- salt-call state.sls linux,openssh,salt
-
- # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
- - apt-get -y install --install-recommends linux-generic-lts-xenial
- - reboot
-
- ########################################################
- # Node is ready, allow SSH access
- ##- echo "Allow SSH access ..."
- ##- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
- auto eth1
- iface eth1 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data1604.yaml
deleted file mode 100644
index f65587d..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
- #- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
- #- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION}} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - apt-get update
- - apt-get -y upgrade
-
- # Install common packages
- - apt-get install -y python-pip git
- - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
- - apt-get install -y salt-minion
-
- # To be configured from inventory/fuel-devops by operator or autotests
- - 'echo "id: {hostname}" >> /etc/salt/minion'
- - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
- - echo "Restarting minion service with workarounds..."
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
- - sleep 5
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
-
- #- echo "Showing node metadata..."
- #- salt-call pillar.data
-
- #- echo "Running complete state ..."
- #- salt-call state.sls linux,openssh,salt
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/underlay.yaml
deleted file mode 100644
index 0a6b9f4..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay.yaml
+++ /dev/null
@@ -1,413 +0,0 @@
----
-aliases:
- default_interface_model:
- - &interface_model !os_env INTERFACE_MODEL, virtio
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp10-ovs') }}
-
- address_pools:
- private-pool01:
- net: 172.16.10.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: 192.168.10.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: 10.1.0.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: 10.16.0.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
- format: qcow2
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data-cfg01.yaml
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: &interfaces
- - label: eth0
- l2_network_device: admin
- interface_model: *interface_model
- - label: eth1
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- eth0:
- networks:
- - admin
- eth1:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
deleted file mode 100644
index c42213f..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-{% from 'virtual-mcp10-ovs/map.jinja' import HOSTNAME_CFG01 with context %}
-
-# Install support services
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check the VIP
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' cmd.run 'ip a | grep 172.16.10.2' | grep -B1 172.16.10.2
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/map.jinja b/tcp_tests/templates/virtual-mcp10-ovs/map.jinja
deleted file mode 100644
index 424909f..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/map.jinja
+++ /dev/null
@@ -1,2 +0,0 @@
-{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-ovs') + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
deleted file mode 100644
index b8982b9..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
+++ /dev/null
@@ -1,281 +0,0 @@
-{% from 'virtual-mcp10-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp10-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp10-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp10-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp10-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-# Install OpenStack control services
-
-- description: Install keystone service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; keystone service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure glusterfs.client on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet tokens for keystone server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
-
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_external
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create subnet_net04
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create router
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set geteway
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Add interface
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp10-ovs/salt.yaml
deleted file mode 100644
index 3b6c798..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/salt.yaml
+++ /dev/null
@@ -1,93 +0,0 @@
-{% from 'virtual-mcp10-ovs/map.jinja' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp10-ovs/map.jinja' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp10-ovs/map.jinja' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set SALT_VERSION = os_env('SALT_VERSION', '2016.3') %}
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-# Install salt to the config node
-
-- description: Configure repository on the cfg01 node
- cmd:
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list;
- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/"+SALT_VERSION+ "/SALTSTACK-GPG-KEY.pub | apt-key add -;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Update packages on cfg01
- cmd: apt-get clean; apt-get update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Install common packages on cfg01
- cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon"') }}
-
-- description: Configure salt-minion on cfg01
- cmd: |
- [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
- cat << "EOF" >> /etc/salt/minion.d/minion.conf
- id: {{ HOSTNAME_CFG01 }}
- master: 127.0.0.1
- EOF
- apt-get install -y salt-minion;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-
-- description: Restart services
- cmd: |
- systemctl restart salt-master;
- systemctl restart salt-minion;
- echo "Showing system info and metadata ...";
- salt-call --no-color grains.items;
- salt-call --no-color pillar.data;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-#- description: Hack gtw node
-# cmd: salt 'gtw*' cmd.run "ip addr del 172.16.10.110/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp01 node
-# cmd: salt 'cmp01*' cmd.run "ip addr del 172.16.10.105/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: Hack cmp02 node
-# cmd: salt 'cmp02*' cmd.run "ip addr del 172.16.10.106/24 dev ens4; ip addr flush dev ens4";
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp10-ovs/underlay--meta-data.yaml
deleted file mode 100644
index 3699401..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/underlay--meta-data.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-| # All the data below will be stored as a string object
- instance-id: iid-local1
- hostname: {hostname}
- local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 75fef7c..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data1404.yaml
deleted file mode 100644
index 487ccfa..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data1404.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup eth0
- #- sudo route add default gw {gateway} {interface_name}
- - sudo ifup eth1
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
- #- echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main" > /etc/apt/sources.list.d/saltstack.list
- #- wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }} trusty main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - apt-get update
- - apt-get -y upgrade
-
- # Install common packages
- - apt-get install -y python-pip git
- - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
- - apt-get install -y salt-minion
-
- # To be configured from inventory/fuel-devops by operator or autotests
- - 'echo "id: {hostname}" >> /etc/salt/minion'
- - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
- - echo "Restarting minion service with workarounds..."
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
- - sleep 5
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
-
- #- echo "Showing node metadata..."
- #- salt-call pillar.data
-
- #- echo "Running complete state ..."
- #- salt-call state.sls linux,openssh,salt
-
- # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
- - apt-get -y install --install-recommends linux-generic-lts-xenial
- - reboot
-
- ########################################################
- # Node is ready, allow SSH access
- ##- echo "Allow SSH access ..."
- ##- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto eth0
- iface eth0 inet dhcp
- auto eth1
- iface eth1 inet dhcp
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data1604.yaml
deleted file mode 100644
index 3e0c1fd..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/underlay--user-data1604.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt" > /etc/apt/sources.list
- - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
- # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
- #- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
- #- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }} xenial main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/{{ SALT_VERSION }}/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - apt-get clean
- - apt-get update
- - apt-get -y upgrade
-
- # Install common packages
- - apt-get install -y python-pip git
- - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
-
- - apt-get install -y salt-minion
-
- # To be configured from inventory/fuel-devops by operator or autotests
- - 'echo "id: {hostname}" >> /etc/salt/minion'
- - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
-
- - echo "Restarting minion service with workarounds..."
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
- - sleep 5
- - rm -f /etc/salt/pki/minion/minion_master.pub
- - service salt-minion restart
-
- #- echo "Showing node metadata..."
- #- salt-call pillar.data
-
- #- echo "Running complete state ..."
- #- salt-call state.sls linux,openssh,salt
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp10-ovs/underlay.yaml
deleted file mode 100644
index 0a6b9f4..0000000
--- a/tcp_tests/templates/virtual-mcp10-ovs/underlay.yaml
+++ /dev/null
@@ -1,413 +0,0 @@
----
-aliases:
- default_interface_model:
- - &interface_model !os_env INTERFACE_MODEL, virtio
-
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-
-template:
- devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp10-ovs') }}
-
- address_pools:
- private-pool01:
- net: 172.16.10.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- admin-pool01:
- net: 192.168.10.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +90
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+90, -10]
-
- tenant-pool01:
- net: 10.1.0.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
- external-pool01:
- net: 10.16.0.0/24:24
- params:
- ip_reserved:
- gateway: +1
- l2_network_device: +1
- default_{{ HOSTNAME_CFG01 }}: +100
- default_{{ HOSTNAME_CTL01 }}: +101
- default_{{ HOSTNAME_CTL02 }}: +102
- default_{{ HOSTNAME_CTL03 }}: +103
- default_{{ HOSTNAME_CMP01 }}: +105
- default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_GTW01 }}: +110
- default_{{ HOSTNAME_PRX01 }}: +121
- ip_ranges:
- dhcp: [+10, -10]
-
-
- groups:
- - name: default
- driver:
- name: devops.driver.libvirt
- params:
- connection_string: !os_env CONNECTION_STRING, qemu:///system
- storage_pool_name: !os_env STORAGE_POOL_NAME, default
- stp: False
- hpet: False
- enable_acpi: true
- use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
- use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
-
- network_pools:
- admin: admin-pool01
- private: private-pool01
- tenant: tenant-pool01
- external: external-pool01
-
- l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
- admin:
- address_pool: admin-pool01
- dhcp: true
- forward:
- mode: nat
-
- tenant:
- address_pool: tenant-pool01
- dhcp: false
-
- external:
- address_pool: external-pool01
- dhcp: true
- forward:
- mode: nat
-
-
- group_volumes:
- - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
- format: qcow2
- - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
- # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
- format: qcow2
-
- nodes:
- - name: {{ HOSTNAME_CFG01 }}
- role: salt_master
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data-cfg01.yaml
-
- interfaces:
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- network_config:
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: &interfaces
- - label: eth0
- l2_network_device: admin
- interface_model: *interface_model
- - label: eth1
- l2_network_device: private
- interface_model: *interface_model
- network_config: &network_config
- eth0:
- networks:
- - admin
- eth1:
- networks:
- - private
-
- - name: {{ HOSTNAME_CTL02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_CTL03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_PRX01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: eth0
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1404
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1404.yaml
-
- interfaces: *interfaces
- network_config: *network_config
-
-
- - name: {{ HOSTNAME_CMP01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
-
- interfaces: &all_interfaces
- - label: ens3
- l2_network_device: admin
- interface_model: *interface_model
- - label: ens4
- l2_network_device: private
- interface_model: *interface_model
- - label: ens5
- l2_network_device: tenant
- interface_model: *interface_model
- - label: ens6
- l2_network_device: external
- interface_model: *interface_model
- network_config: &all_network_config
- ens3:
- networks:
- - admin
- ens4:
- networks:
- - private
- ens5:
- networks:
- - tenant
- ens6:
- networks:
- - external
-
- - name: {{ HOSTNAME_CMP02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *all_interfaces
- network_config: *all_network_config
-
- - name: {{ HOSTNAME_GTW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: !include underlay--meta-data.yaml
- cloudinit_user_data: !include underlay--user-data1604.yaml
-
- interfaces: *all_interfaces
- network_config: *all_network_config
diff --git a/tcp_tests/tests/system/test_mcp10_ovs_vxlan_install.py b/tcp_tests/tests/system/test_mcp10_ovs_vxlan_install.py
deleted file mode 100644
index 570af89..0000000
--- a/tcp_tests/tests/system/test_mcp10_ovs_vxlan_install.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2017 Mirantis, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pytest
-
-from tcp_tests import logger
-
-LOG = logger.logger
-
-
-@pytest.mark.deploy
-class TestMCP10OvsVxlanInstall(object):
- """Test class for testing mcp10 vxlan deploy"""
-
- @pytest.mark.fail_snapshot
- def test_mcp10_ovs_vxlan_install(self, underlay, openstack_deployed,
- show_step):
- """Test for deploying an mcp environment and check it
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
-
- """
- LOG.info("*************** DONE **************")