Merge "Add testrail reporter for cicd deployments"
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 0f81596..2c1ba2d 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -52,6 +52,7 @@
for item in sl_vip_address_pillars
for node, ip in item.items() if ip])
if not sl_vip_ip:
+ tgt = 'I@prometheus:server:enabled:True and mon*'
pillar = 'keepalived:cluster:instance:VIP:address'
sl_vip_address_pillars = self._salt.get_pillar(tgt=tgt,
pillar=pillar)
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
index 8675650..325ba87 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/openstack.yaml
@@ -140,7 +140,7 @@
-C 'I@opencontrail:database and *01*' state.sls opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 30}
- skip_fail: true
+ skip_fail: false
- description: Check contrail status
cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
index 6e6cfaa..32afbfa 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -40,12 +40,12 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: "WR for changing image to proposed"
+- description: "Change path to internal storage for salt.control images"
cmd: |
set -e;
- # Add message_queu host for opencontrail
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -102,6 +102,15 @@
retry: {count: 2, delay: 10}
skip_fail: false
+- description: WR for mounting 1G hugepages before linux.state
+ cmd: |
+ salt 'cmp*' state.sls linux.system.hugepages;
+ salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
+ salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
########################################
diff --git a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
index c8a054e..471fc68 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/openstack.yaml
@@ -142,6 +142,13 @@
retry: {count: 3, delay: 30}
skip_fail: true
+- description: Highstate analytics
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'nal*' state.sls highstate && sleep 15;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
- description: Check contrail status
cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
index 85182a6..1125a8f 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
@@ -217,7 +217,7 @@
stacklight_log_node02_hostname: log02
stacklight_log_node03_address: 10.167.8.63
stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
+ stacklight_long_term_storage_type: prometheus
stacklight_monitor_address: 10.167.8.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.8.71
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index 7e54fd4..494333a 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -40,12 +40,12 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: "WR for changing image to proposed"
+- description: "Change path to internal storage for salt.control images"
cmd: |
set -e;
- # Add message_queu host for opencontrail
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -72,7 +72,7 @@
- description: Update minion information
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
index cdb0daf..49cf6a8 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
@@ -260,7 +260,7 @@
skip_fail: false
- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 15}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/post.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/post.yaml
index 61b1bd4..fd07061 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/post.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/post.yaml
@@ -35,7 +35,7 @@
- description: Create net04
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
+ '. /root/keystonercv3; neutron net-create net04 --provider:network_type flat'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 72c8bf4..0d9e84c 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -77,7 +77,7 @@
control_network_netmask: 255.255.255.0
control_network_subnet: 10.167.11.0/24
control_vlan: '2416'
- cookiecutter_template_branch: master
+ cookiecutter_template_branch: proposed
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.62
@@ -161,12 +161,14 @@
openstack_nfv_sriov_enabled: 'True'
openstack_nova_compute_hugepages_count: '16'
openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
+ openstack_nfv_sriov_network: physnet2
openstack_nfv_sriov_numvfs: '7'
openstack_nfv_sriov_pf_nic: enp5s0f1
openstack_nova_cpu_pinning: 6,7,8,9,10,11
+ openstack_nova_compute_reserved_host_memory_mb: '900'
openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
+ openstack_ovs_encapsulation_type: vlan
+ openstack_ovs_encapsulation_vlan_range: 2418:2420
openstack_proxy_address: 10.167.11.80
openstack_proxy_hostname: prx
openstack_proxy_node01_address: 10.167.11.81
@@ -187,7 +189,7 @@
salt_master_address: 10.167.11.2
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.2
- shared_reclass_branch: master
+ shared_reclass_branch: proposed
shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
@@ -197,4 +199,5 @@
tenant_network_subnet: 10.167.12.0/24
tenant_vlan: '2417'
upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
\ No newline at end of file
+ use_default_network_scheme: 'True'
+ sriov_network_subnet: 192.168.10.0/24
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
index a25e0d2..8a6d342 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
@@ -33,6 +33,18 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+- description: "WR for changing VCP images path to internal storage"
+ cmd: |
+ set -e;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 44badb5..61d693b 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -181,14 +181,14 @@
- description: Create net04_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type vlan --provider:segmentation_id 2403'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create subnet_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.10,end=172.17.42.60 --gateway 172.17.42.1'
+ '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.0/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.10,end=172.17.42.60 --gateway 172.17.42.1'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index f1416d5..06f9f1f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -20,8 +20,8 @@
context_seed: Psupdi5ne1kCk31iDWV7fhbHnBALIr3SWhce7Z01jCaMwlAhGKxeLPFPQ9CgYzJD
control_network_netmask: 255.255.255.0
control_network_subnet: 10.167.4.0/24
- control_vlan: '2403'
- cookiecutter_template_branch: master
+ control_vlan: '2405'
+ cookiecutter_template_branch: proposed
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.164.1
@@ -193,3 +193,5 @@
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'True'
vnf_onboarding_enabled: 'False'
+ ex_vlan: '2403'
+ ex_network_netmask: 255.255.255.192
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index a70b9f3..c92c9b9 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -70,8 +70,8 @@
role: single_mgm_dhcp
enp9s0f1:
role: bond0_ab_dvr_vxlan_ctl_mesh_floating
- single_address: 10.167.4.105
- tenant_address: 10.167.6.105
+ single_address: 10.167.4.106
+ tenant_address: 10.167.6.106
gtw01.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node01
@@ -82,7 +82,8 @@
enp2s0f0:
role: single_mgm_dhcp
enp2s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+ role: bond0_ab_dvr_vxlan_ctl_mesh_external
+ ex_address: 172.17.42.4
gtw02.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node02
@@ -93,4 +94,5 @@
enp2s0f0:
role: single_mgm_dhcp
enp2s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+ role: bond0_ab_dvr_vxlan_ctl_mesh_external
+ ex_address: 172.17.42.5
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 381b780..eb7e213 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -21,14 +21,14 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-- description: "WR for changing image to proposed"
+- description: "WR for changing VCP images path to internal storage"
cmd: |
set -e;
apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-dvr-vxlan/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -127,14 +127,6 @@
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-- description: Add cpm nodes to /etc/hosts
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.4.105 cmp01.cookied-bm-mcp-dvr-vxlan.local cmp01' >> /etc/hosts";
- salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.4.106 cmp02.cookied-bm-mcp-dvr-vxlan.local cmp02' >> /etc/hosts";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
index 6ebf0db..f30331e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
@@ -41,6 +41,16 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: "Change path to internal storage for salt.control images"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
- description: "WR for dpdk pci to be in correct quotes"
@@ -76,6 +86,15 @@
retry: {count: 1, delay: 5}
skip_fail: true
+- description: WR for mounting 1G hugepages before linux.state
+ cmd: |
+ salt 'cmp*' state.sls linux.system.hugepages;
+ salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
+ salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Temporary WR for correct bridge name according to envoronment templates
cmd: |
sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index bcccb66..e57820e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -168,7 +168,7 @@
stacklight_log_node02_hostname: log02
stacklight_log_node03_address: 10.167.8.63
stacklight_log_node03_hostname: log03
- stacklight_long_term_storage_type: influxdb
+ stacklight_long_term_storage_type: prometheus
stacklight_monitor_address: 10.167.8.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.8.71
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 926b149..a214666 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -42,6 +42,16 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: "Change path to internal storage for salt.control images"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{%- if CUSTOM_VCP_TRUSTY_IMAGE_URL != '' %}
- description: "Change trusty image to custom"
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index fba1474..653fc81 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -77,7 +77,7 @@
control_network_netmask: 255.255.255.0
control_network_subnet: 10.167.11.0/24
control_vlan: '2416'
- cookiecutter_template_branch: master
+ cookiecutter_template_branch: proposed
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
deploy_network_gateway: 172.16.49.62
@@ -161,12 +161,14 @@
openstack_nfv_sriov_enabled: 'True'
openstack_nova_compute_hugepages_count: '16'
openstack_nova_compute_nfv_req_enabled: 'True'
- openstack_nfv_sriov_network: physnet1
+ openstack_nfv_sriov_network: physnet2
openstack_nfv_sriov_numvfs: '7'
openstack_nfv_sriov_pf_nic: enp5s0f1
openstack_nova_cpu_pinning: 6,7,8,9,10,11
+ openstack_nova_compute_reserved_host_memory_mb: '900'
openstack_ovs_dvr_enabled: 'False'
- openstack_ovs_encapsulation_type: vxlan
+ openstack_ovs_encapsulation_type: vlan
+ openstack_ovs_encapsulation_vlan_range: 2418:2420
openstack_proxy_address: 10.167.11.80
openstack_proxy_hostname: prx
openstack_proxy_node01_address: 10.167.11.81
@@ -187,7 +189,7 @@
salt_master_address: 10.167.11.2
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.2
- shared_reclass_branch: master
+ shared_reclass_branch: proposed
shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
stacklight_enabled: 'False'
stacklight_version: '2'
@@ -197,4 +199,5 @@
tenant_network_subnet: 10.167.12.0/24
tenant_vlan: '2417'
upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'True'
\ No newline at end of file
+ use_default_network_scheme: 'True'
+ sriov_network_subnet: 192.168.10.0/24
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
index 0bdcf96..8aa8577 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
@@ -100,7 +100,7 @@
role: single_mgm
deploy_address: 172.16.49.5
enp3s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+ role: bond0_ab_dvr_vlan_ctl_prv_floating
gtw02.cookied-bm-mcp-ovs-dpdk.local:
reclass_storage_name: openstack_gateway_node02
@@ -112,4 +112,4 @@
role: single_mgm
deploy_address: 172.16.49.4
enp3s0f1:
- role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+ role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
index 99b3aa7..4ab0f03 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
@@ -5,10 +5,16 @@
{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
-- description: Create and distribute SSL certificates for services using salt state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
+- description: Sync all
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create and distribute SSL certificates for services using salt state
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
skip_fail: true
- description: Install docker
@@ -39,20 +45,6 @@
retry: {count: 1, delay: 10}
skip_fail: true
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: Install haproxy
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@haproxy:proxy' state.sls haproxy
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
index 45ad04f..d559d73 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
@@ -1,4 +1,39 @@
-{% from 'k8s-ha-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+- description: Install keepalived on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install etcd
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@etcd:server' state.sls etcd.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install certs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@etcd:server' salt.minion -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
- description: Install etcd
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -19,7 +54,7 @@
-C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Install Kubernetes components
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -37,13 +72,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install Opencontrail db on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 20}
- skip_fail: false
-
- description: Configure OpenContrail as an add-on for Kubernetes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
@@ -87,13 +115,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-# - description: Reboot contrail computes
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:compute' cmd.run 'reboot'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
- description: Reboot contrail computes
cmd: salt --timeout=600 -C 'I@opencontrail:compute' system.reboot
node_name: {{ HOSTNAME_CFG01 }}
@@ -140,125 +161,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-
-# - description: Install Opencontrail db on all nodes
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:database' state.sls opencontrail.database
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 20}
-# skip_fail: false
-
-# - description: Install Opencontrail control on ctl01
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# - description: Install Opencontrail control on all nodes
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# - description: Install Opencontrail on collector
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# # OpenContrail vrouters
-# - description: Install Opencontrail client
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# - description: Install Opencontrail on computes
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 5}
-# skip_fail: false
-
-# - description: Wake up vhost0
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@nova:compute' cmd.run 'exec 0>&-; exec 1>&-; exec 2>&-;
-# nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# - description: Install Opencontrail client on computes
-# cmd: sleep 300 && salt --timeout=60 --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# - description: Install Opencontrail on computes #2
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:compute' state.sls opencontrail
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 5}
-# skip_fail: false
-
-# # Kubernetes
-# - description: Install Kubernetes Addons
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: true
-
-# - description: Check contrail status
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@opencontrail:database' cmd.run contrail-status
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-# - description: Install Kubernetes components
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@kubernetes:pool' state.sls kubernetes.pool
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 60}
-# skip_fail: false
-
-# # NOTE(vryzhenkin): There is nothing to setup at this model
-# #- description: Setup etcd server on primary controller
-# # cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# # -C 'I@kubernetes:master and *01*' state.sls etcd.server.setup
-# # node_name: {{ HOSTNAME_CFG01 }}
-# # retry: {count: 1, delay: 5}
-# # skip_fail: false
-
-# - description: Run Kubernetes master without setup
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 3, delay: 5}
-# skip_fail: true
-
-# - description: Run Kubernetes master setup
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: true
-
-# - description: Restart Kubelet
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@kubernetes:pool' service.restart 'kubelet'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: true
-
-# - description: Renew hosts file on a whole cluster
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
index eb9d76f..c5648a8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
@@ -42,6 +42,45 @@
enp9s0f1:
role: single_ctl
+ ctl01.bm-mcp-pike-k8s-contrail.local:
+ reclass_storage_name: kubernetes_control_node01
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.17.41.9
+ enp2s0f1:
+ role: single_ctl
+ single_address: 10.167.8.239
+
+ ctl02.bm-mcp-pike-k8s-contrail.local:
+ reclass_storage_name: kubernetes_control_node02
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.17.41.10
+ enp2s0f1:
+ role: single_ctl
+ single_address: 10.167.8.238
+
+ ctl03.bm-mcp-pike-k8s-contrail.local:
+ reclass_storage_name: kubernetes_control_node03
+ roles:
+ - kubernetes_control_contrail
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.17.41.11
+ enp2s0f1:
+ role: single_ctl
+ single_address: 10.167.8.237
+
# prx01.bm-mcp-pike-k8s-contrail.local:
# reclass_storage_name: kubernetes_proxy_node01
# roles:
@@ -75,6 +114,23 @@
role: single_ctl
single_address: 10.167.8.101
+ cmp002.bm-mcp-pike-k8s-contrail.local:
+ reclass_storage_name: kubernetes_compute_node002
+ roles:
+ - linux_system_codename_xenial
+ - kubernetes_compute_contrail
+ - salt_master_host
+ #- features_lvm_backend
+ interfaces:
+ enp9s0f0:
+ role: single_dhcp
+ ens11f0:
+ role: bond0_ab_contrail
+ tenant_address: 192.168.0.102
+ ens11f1:
+ role: single_ctl
+ single_address: 10.167.8.102
+
# cmp002.bm-mcp-pike-k8s-contrail.local:
# reclass_storage_name: kubernetes_compute_node02
# roles:
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index b1a3be5..88aef93 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -1,3 +1,4 @@
+#https://docs.mirantis.com/mcp/master/mcp-ref-arch/opencontrail-plan/contrail-overview/contrail-4.html#
default_context:
backup_private_key: |-
-----BEGIN RSA PRIVATE KEY-----
@@ -30,6 +31,7 @@
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyRnKFPJJha8aun52W/6rYtryiIzxK/Ul9CiDz5RbbyEZRsKo+dZrtvNWoQ0Rj6D6ptBiHY+9vbFRHJj37TZNYuYJjOrv6is7V4bZIO3fzhjMNzoGdCiFRsbBpIfZixP9ADwpXV+9Rb0/SBUQeYlZHwmje91HLD60RSzNiSJNiRxVz3O07l5hP1rqBKMuGX62Yle7fyN1ZNwTzMk/44aklEMmlXYQBjC/4td171ES/goIYyo2W+WL0gD7Nqt8ywDNJM+kvmiiyUHbRqQozWHmWLcsbjs6kowb2wxUUYQSjv6WpMZrNQkRQ9slT3k+ot/Ggg6NgX8yFYpTlUAsutpXD
bmk_enabled: 'False'
ceph_enabled: 'False'
+ auditd_enabled: 'False'
cicd_control_node01_address: 10.167.8.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.8.92
@@ -104,7 +106,7 @@
infra_primary_first_nic: eth1
infra_primary_second_nic: eth2
kubernetes_enabled: 'True'
- kubernetes_compute_count: 1
+ kubernetes_compute_count: 2
kubernetes_compute_rack01_single_subnet: 10.167.8
kubernetes_compute_rack01_tenant_subnet: 192.168.0
kubernetes_network_opencontrail_enabled: 'True'
@@ -118,23 +120,34 @@
mcp_common_scripts_branch: ''
mcp_version: proposed
offline_deployment: 'False'
+ kubernetes_control_address: 10.167.8.236
+ kubernetes_control_node01_deploy_address: 172.17.41.9
+ kubernetes_control_node01_address: 10.167.8.239
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_control_node02_deploy_address: 172.17.41.10
+ kubernetes_control_node02_address: 10.167.8.238
+ kubernetes_control_node02_hostname: ctl02
+ kubernetes_control_node02_deploy_address: 172.17.41.11
+ kubernetes_control_node03_address: 10.167.8.237
+ kubernetes_control_node03_hostname: ctl03
+ linux_repo_contrail_component: oc40
opencontrail_analytics_address: 10.167.8.30
- opencontrail_analytics_hostname: nal
+ opencontrail_analytics_hostname: ctl
opencontrail_analytics_node01_address: 10.167.8.31
- opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node01_hostname: ctl01
opencontrail_analytics_node02_address: 10.167.8.32
- opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node02_hostname: ctl02
opencontrail_analytics_node03_address: 10.167.8.33
- opencontrail_analytics_node03_hostname: nal03
+ opencontrail_analytics_node03_hostname: ctl03
opencontrail_compute_iface_mask: '24'
- opencontrail_control_address: 10.167.8.20
- opencontrail_control_hostname: ntw
- opencontrail_control_node01_address: 10.167.8.21
- opencontrail_control_node01_hostname: ntw01
- opencontrail_control_node02_address: 10.167.8.22
- opencontrail_control_node02_hostname: ntw02
- opencontrail_control_node03_address: 10.167.8.23
- opencontrail_control_node03_hostname: ntw03
+ opencontrail_control_address: 10.167.8.236
+ opencontrail_control_hostname: ctl
+ opencontrail_control_node01_address: 10.167.8.239
+ opencontrail_control_node01_hostname: ctl01
+ opencontrail_control_node02_address: 10.167.8.238
+ opencontrail_control_node02_hostname: ctl02
+ opencontrail_control_node03_address: 10.167.8.237
+ opencontrail_control_node03_hostname: ctl03
opencontrail_enabled: 'True'
opencontrail_router01_address: 10.167.8.100
opencontrail_router01_hostname: rtr01
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
index 63f07b5..18032a1 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
@@ -1,56 +1,57 @@
nodes:
# Virtual Control Plane nodes
+# commented as ctl is bm
+# ctl01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: kubernetes_control_node01
+# roles:
+# - kubernetes_control_contrail
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
+#
+# ctl02.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: kubernetes_control_node02
+# roles:
+# - kubernetes_control_contrail
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
+#
+# ctl03.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: kubernetes_control_node03
+# roles:
+# - kubernetes_control_contrail
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
- ctl01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: kubernetes_control_node01
- roles:
- - kubernetes_control_contrail
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
+# commented as there is no k8s proxy nodes in this setup
+# prx01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: kubernetes_proxy_node01
+# roles:
+# - kubernetes_proxy
+# # - infra_proxy
+# # - stacklight_proxy
+# - salt_master_host
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
- ctl02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: kubernetes_control_node02
- roles:
- - kubernetes_control_contrail
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- ctl03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: kubernetes_control_node03
- roles:
- - kubernetes_control_contrail
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: kubernetes_proxy_node01
- roles:
- - kubernetes_proxy
- # - infra_proxy
- # - stacklight_proxy
- - salt_master_host
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
-
- prx02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: kubernetes_proxy_node02
- roles:
- - kubernetes_proxy
- # - infra_proxy
- # - stacklight_proxy
- - salt_master_host
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
+# prx02.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: kubernetes_proxy_node02
+# roles:
+# - kubernetes_proxy
+# # - infra_proxy
+# # - stacklight_proxy
+# - salt_master_host
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
mon01.cookied-bm-mcp-ocata-contrail.local:
reclass_storage_name: stacklight_server_node01
@@ -79,71 +80,72 @@
ens3:
role: single_ctl
- nal01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node01
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_ctl
- single_address: 10.167.8.31
-
- nal02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node02
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_ctl
- single_address: 10.167.8.32
-
- nal03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_analytics_node03
- roles:
- - opencontrail_analytics
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_ctl
- single_address: 10.167.8.33
-
- ntw01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node01
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_ctl
- single_address: 10.167.8.21
-
- ntw02.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node02
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_ctl
- single_address: 10.167.8.22
-
- ntw03.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: opencontrail_control_node03
- roles:
- - opencontrail_control
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- ens3:
- role: single_ctl
- single_address: 10.167.8.23
+# commented as shpuld be in pod
+# nal01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: opencontrail_analytics_node01
+# roles:
+# - opencontrail_analytics
+# - linux_system_codename_xenial
+# - salt_master_host
+# interfaces:
+# ens3:
+# role: single_ctl
+# single_address: 10.167.8.31
+#
+# nal02.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: opencontrail_analytics_node02
+# roles:
+# - opencontrail_analytics
+# - linux_system_codename_xenial
+# - salt_master_host
+# interfaces:
+# ens3:
+# role: single_ctl
+# single_address: 10.167.8.32
+#
+# nal03.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: opencontrail_analytics_node03
+# roles:
+# - opencontrail_analytics
+# - linux_system_codename_xenial
+# - salt_master_host
+# interfaces:
+# ens3:
+# role: single_ctl
+# single_address: 10.167.8.33
+#
+# ntw01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: opencontrail_control_node01
+# roles:
+# - opencontrail_control
+# - linux_system_codename_xenial
+# - salt_master_host
+# interfaces:
+# ens3:
+# role: single_ctl
+# single_address: 10.167.8.21
+#
+# ntw02.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: opencontrail_control_node02
+# roles:
+# - opencontrail_control
+# - linux_system_codename_xenial
+# - salt_master_host
+# interfaces:
+# ens3:
+# role: single_ctl
+# single_address: 10.167.8.22
+#
+# ntw03.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: opencontrail_control_node03
+# roles:
+# - opencontrail_control
+# - linux_system_codename_xenial
+# - salt_master_host
+# interfaces:
+# ens3:
+# role: single_ctl
+# single_address: 10.167.8.23
mtr01.cookied-bm-mcp-ocata-contrail.local:
reclass_storage_name: stacklight_telemetry_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
index a7f431a..d26a22a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
@@ -1,9 +1,6 @@
{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CMP001 with context %}
{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import ETH0_IP_ADDRESS_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import ETH0_IP_ADDRESS_CMP001 with context %}
{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
@@ -34,24 +31,6 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
-
-
-# - description: "Registration cmp001 node"
-# cmd: |
-# salt-call event.send "reclass/minion/classify" \
-# "{\"node_master_ip\": \"{{ ETH0_IP_ADDRESS_CFG01 }}\", \
-# \"node_deploy_ip\": \"{{ ETH0_IP_ADDRESS_CMP001 }}\", \
-# \"node_control_ip\": \"10.167.8.101\", \
-# \"node_tenant_ipcontrol_ip\": \"10.167.8.101\", \
-# \"node_os\": \"xenial\", \
-# \"node_domain\": \"{{ DOMAIN_NAME }}\", \
-# \"node_cluster\": \"{{ LAB_CONFIG_NAME }}\",
-# \"node_hostname\": \"$(hostname -s)\"}"
-# node_name: {{ HOSTNAME_CMP001 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
- description: "Workaround for rack01 compute generator"
cmd: |
set -e;
@@ -62,67 +41,16 @@
retry: {count: 1, delay: 10}
skip_fail: false
-# - description: "WR for changing image to proposed"
-# cmd: |
-# set -e;
-# # Add message_queu host for opencontrail
-# . /root/venv-reclass-tools/bin/activate;
-# reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-# reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: "Workaround for xenial images"
+- description: "Change path to internal storage for salt.control images"
cmd: |
set -e;
- # Add tenant and single addresses for computes
- set -e;
- # Add message_queu host for opencontrail
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.nal01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.nal02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.nal03.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw03.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.prx01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
- reclass-tools add-key parameters.salt.control.cluster.internal.node.prx02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-
-- description: "Workaround for PROD-14060"
- cmd: |
- set -e;
- # Add tenant and single addresses for computes
- salt-call reclass.cluster_meta_set deploy_address 172.17.41.7 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
-
- # salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
- # salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
- # salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
-
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-- description: Temporary workaround for removing cinder-volume from CTL nodes
- cmd: |
- sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
- description: Temporary WR for correct bridge name according to envoronment templates
cmd: |
sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -131,6 +59,48 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Rerun openssh after env model is generated
+ cmd: |
+ salt-call state.sls openssh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: "Disable kubelet_fail_on_swap"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.kubelet_fail_on_swap false /srv/salt/reclass/classes/system/kubernetes/common.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update minion information
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun openssh after env model is generated
+ cmd: |
+ salt-call state.sls openssh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
########################################
@@ -157,13 +127,15 @@
retry: {count: 20, delay: 30}
skip_fail: false
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
#########################################
# Configure all running salt minion nodes
#########################################
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.17.41.2' > /etc/resolv.conf;"
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.17.41.2' > /etc/resolv.conf;"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -186,4 +158,28 @@
retry: {count: 1, delay: 5}
skip_fail: false
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Lab04 workaround: Give each node root acces with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ salt '*' cmd.run "service sshd restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
+ cmd: |
+ set -e;
+ set -x;
+ KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
+ apt-get install -y sshuttle;
+ sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
index 4045fe8..0b559a8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install docker swarm
- description: Configure docker service
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
index a8981c7..ddbd762 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
@@ -42,11 +42,11 @@
#- sudo route add default gw {gateway} {interface_name}
# Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+ # - fallocate -l 4G /swapfile
+ # - chmod 600 /swapfile
+ # - mkswap /swapfile
+ # - swapon /swapfile
+ # - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
############## TCP Cloud cfg01 node ##################
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
index bb7056a..89b0da7 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
@@ -41,11 +41,11 @@
#- sudo route add default gw {gateway} {interface_name}
# Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+ # - fallocate -l 4G /swapfile
+ # - chmod 600 /swapfile
+ # - mkswap /swapfile
+ # - swapon /swapfile
+ # - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
############## TCP Cloud cfg01 node ##################
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
index ef8c4f1..da6afea 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
@@ -8,27 +8,20 @@
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{#
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-#}
{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '172.17.41.3') %}
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.17.41.4') %}
{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.17.41.5') %}
{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.17.41.6') %}
{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.17.41.7') %}
-{#
-# {% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
-# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
-#}
-{#
-# {% set ETH0_IP_ADDRESS_PRX01 = os_env('ETH0_IP_ADDRESS_PRX01', '172.17.41.8') %}
-# {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
-#}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.17.41.8') %}
+{% set ETH0_IP_ADDRESS_CTL01 = os_env('ETH0_IP_ADDRESS_CTL01', '172.17.41.9') %}
+{% set ETH0_IP_ADDRESS_CTL02 = os_env('ETH0_IP_ADDRESS_CTL02', '172.17.41.10') %}
+{% set ETH0_IP_ADDRESS_CTL03 = os_env('ETH0_IP_ADDRESS_CTL03', '172.17.41.11') %}
{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
@@ -59,14 +52,20 @@
default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- #default_{{ HOSTNAME_PRX01 }}: {{ ETH0_IP_ADDRESS_PRX01 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL01 }}
+ default_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
+ default_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
virtual_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- #virtual_{{ HOSTNAME_PRX01 }}: {{ ETH0_IP_ADDRESS_PRX01 }}
+ virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ virtual_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL01 }}
+ virtual_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
+ virtual_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
#ip_ranges:
# dhcp: [+2, -4]
private-pool01:
@@ -415,6 +414,132 @@
parents:
- enp9s0f1
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CTL01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_CTL01
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CTL01
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD_CTL
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CTL02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_CTL02
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CTL02
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD_CTL
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CTL03 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ # - label: eno1
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_CTL03
+ # - label: eno2
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CTL03
+
+ network_config:
+ # eno1:
+ enp2s0f0:
+ networks:
+ - admin
- name: {{ HOSTNAME_CMP001 }}
role: salt_minion
@@ -473,66 +598,49 @@
parents:
- enp9s0f1
+ - name: {{ HOSTNAME_CMP002 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
- # - name: {{ HOSTNAME_CMP002 }}
- # role: salt_minion
- # params:
- # ipmi_user: !os_env IPMI_USER
- # ipmi_password: !os_env IPMI_PASSWORD
- # ipmi_previlegies: OPERATOR
- # ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
- # ipmi_lan_interface: lanplus
- # ipmi_port: 623
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
- # root_volume_name: system # see 'volumes' below
- # cloud_init_volume_name: iso # see 'volumes' below
- # # cloud_init_iface_up: eno1 # see 'interfaces' below.
- # cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
- # volumes:
- # - name: system
- # capacity: !os_env NODE_VOLUME_SIZE, 200
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
- # # The same as for agent URL, here is an URL to the image that should be
- # # used for deploy the node. It should also be accessible from deploying
- # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_hwe_cmp
- # - name: iso # Volume with name 'iso' will be used
- # # for store image with cloud-init metadata.
-
- # cloudinit_meta_data: *cloudinit_meta_data
- # cloudinit_user_data: *cloudinit_user_data_hwe
-
- # interfaces:
- # # - label: eno1
- # - label: enp2s0f0
- # mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- # # - label: eth0
- # - label: enp2s0f1
- # l2_network_device: admin
- # mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- # # - label: eth3
- # - label: enp5s0f0
- # mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
- # # - label: eth2
- # - label: enp5s0f1
- # mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- # features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
- # # - label: eth4
- # # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
- # # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
-
- # network_config:
- # enp2s0f1:
- # networks:
- # - admin
- # bond0:
- # networks:
- # - control
- # aggregation: active-backup
- # parents:
- # - enp5s0f0
- # - enp5s0f1
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
index eaf156f..8e463ce 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
@@ -1,3 +1,4 @@
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
@@ -15,26 +16,7 @@
retry: {count: 1, delay: 10}
skip_fail: true
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
index 85e9821..fe23827 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
@@ -1,3 +1,5 @@
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
@@ -30,113 +32,19 @@
node_name: {{ HOSTNAME_CFG01 }}
{%- endif %}
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# install designate
+# install designate backend
- description: Install powerdns
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@powerdns:server' state.sls powerdns.server
@@ -144,47 +52,9 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
# Install compute node
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
index 89dbf64..7c9c675 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
@@ -1,7 +1,6 @@
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
# Install support services
- description: Install keepalived on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -17,26 +16,7 @@
retry: {count: 1, delay: 10}
skip_fail: true
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
index 7d4673b..7927779 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
@@ -1,3 +1,5 @@
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
@@ -6,103 +8,26 @@
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-# Install OpenStack control services
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-- description: Install glance on all controllers
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+
+#{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+- description: Install neutron service on primary node
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
+ -C "I@neutron:server and *01*" state.sls neutron.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install keystone service (note that different fernet keys are created on different nodes)
+- description: Install neutron service on other nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
+ -C "I@neutron:server" state.sls neutron.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -132,21 +57,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-# install designate
-- description: Install powerdns
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@powerdns:server' state.sls powerdns.server
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
@@ -154,33 +64,19 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install heat service
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+# install designate backend
+- description: Install powerdns
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
+ -C 'I@powerdns:server' state.sls powerdns.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
# Install compute node
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index 5b8bbf7..b2bbba8 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -269,84 +269,4 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
index b1642b2..d4e8938 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -248,84 +248,4 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index 838bc31..4d672b3 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -46,6 +46,11 @@
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
# reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Add cinder volume on cmp nodes. PROD-20945
+ reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -58,6 +63,5 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
-
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index dd6c711..7b052a8 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -46,6 +46,11 @@
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
# reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Add cinder volume on cmp nodes. PROD-20945
+ reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/shared-core.yaml b/tcp_tests/templates/shared-core.yaml
index da3db16..480e852 100644
--- a/tcp_tests/templates/shared-core.yaml
+++ b/tcp_tests/templates/shared-core.yaml
@@ -47,10 +47,35 @@
{%- endmacro %}
+{%- macro MACRO_INSTALL_GLUSTERFS() %}
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status && gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+- description: Mount glusterfs.client volumes (requires created 'keystone' and
+ 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
-
-
+{%- endmacro %}
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
new file mode 100644
index 0000000..37bf5f3
--- /dev/null
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -0,0 +1,254 @@
+{# Collection of common macroses shared across openstack services #}
+
+{%- macro MACRO_INSTALL_KEYSTONE() %}
+- description: Execute salt orchestration state to configure all needed
+ prerequisites like creating SSH public key ant etc.
+ cmd: salt-run state.orchestrate keystone.orchestrate.deploy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server and *01*' state.sls keystone.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Install keystone service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+ cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+ cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_GLANCE() %}
+- description: Install glance service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server and *01*' state.sls glance.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install glance service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update Fernet tokens
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_NOVA() %}
+- description: Install nova service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@nova:controller and *01*" state.sls nova.controller
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install nova service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@nova:controller" state.sls nova.controller
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3; nova
+ --debug service-list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check nova list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3; nova --debug list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CINDER() %}
+- description: Install cinder on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@cinder:controller and *01*" state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install cinder on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@cinder:controller" state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_NEUTRON() %}
+- description: Install neutron service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@neutron:server and *01*" state.sls neutron.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@neutron:server" state.sls neutron.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_HEAT() %}
+- description: Install heat service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@heat:server and *01*" state.sls heat.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@heat:server" state.sls heat.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_HORIZON() %}
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_DESIGNATE() %}
+# Note: deploy backend for designate firstly
+- description: Install designate on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@designate:server and *01*" state.sls designate.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Install designate on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@designate:server" state.sls designate
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CEILOMETER() %}
+# TO DO
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_BARBICAN() %}
+# TO DO
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_IRONIC() %}
+# TO DO
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_MANILA() %}
+# TO DO
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_OCTAVIA_API() %}
+# TO DO
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_DOGTAG() %}
+# TO DO
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_GNOCCHI() %}
+# TO DO
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_AODH() %}
+# TO DO
+{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
index 62e0fe1..2b36b54 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
@@ -1,3 +1,4 @@
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
@@ -15,26 +16,7 @@
retry: {count: 1, delay: 10}
skip_fail: true
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
index 564d7b7..6c5a733 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
@@ -1,3 +1,5 @@
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
@@ -30,113 +32,19 @@
node_name: {{ HOSTNAME_CFG01 }}
{%- endif %}
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# isntall designate
+# install designate backend
- description: Install powerdns
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@powerdns:server' state.sls powerdns.server
@@ -144,47 +52,9 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
# Install compute node
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
index 132f8c5..d6e2dde 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
@@ -1,3 +1,4 @@
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
@@ -15,26 +16,7 @@
retry: {count: 1, delay: 10}
skip_fail: true
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
index 5dd4223..d8e1ed1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
@@ -1,3 +1,5 @@
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
@@ -6,115 +8,19 @@
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-# Install OpenStack control services
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-- description: Install glance on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# install designate
+# install designate backend
- description: Install bind
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@bind:server' state.sls bind
@@ -122,47 +28,9 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE() }}
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
# Install compute node
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
index 362804f..dc5c185 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
@@ -8,9 +8,16 @@
# Install OpenStack control services
-- description: Install glance on all controllers
+- description: Install glance on primary controller
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
+ -C 'I@glance:server:role:primary' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install glance on other controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server:role:secondary' state.sls glance -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
index 1145610..ea53986 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
@@ -13,9 +13,16 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install glance on all controllers
+- description: Install glance on primary controller
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glance -b 1
+ -C 'I@glance:server:role:primary' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install glance on other controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server:role:secondary' state.sls glance -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index dedbab8..921cd7b 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -88,6 +88,12 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install watchdog
+ cmd: salt -C "I@watchdog:server" state.sls watchdog;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
cmd: |
salt 'cmp*' cmd.run "ifup br-mesh";