Merge "Move rebooting HW node before the booting cfg node"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 6ff52e6..dc9ed1f 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -105,7 +105,7 @@
if (tcpqa_report_name) {
stage("tcp-qa cases report") {
testSuiteName = "[MCP_X] integration cases"
- methodname = "{methodname}"
+ methodname = "{classname}.{methodname}"
testrail_name_template = "{title}"
reporter_extra_options = [
"--testrail-add-missing-cases",
diff --git a/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml b/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
index 668b189..69285dd 100644
--- a/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-e7-cicd-pike-ovs-maas.yml
@@ -11,7 +11,7 @@
name: LAB_CONFIG_NAME
trim: 'false'
- string:
- default: core,kvm,cicd
+ default: core,kvm,cicd,openstack,ovs,stacklight
description: Comma-separated list of stacks to deploy the drivetrain (salt
cluster and cicd nodes)
name: DRIVETRAIN_STACK_INSTALL
@@ -22,7 +22,7 @@
name: DRIVETRAIN_STACK_INSTALL_TIMEOUT
trim: 'false'
- string:
- default: openstack,ovs,stacklight
+ default: ""
description: Comma-separated list of stacks to deploy the target platform
(openstack and additional components)
name: PLATFORM_STACK_INSTALL
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index cffbe58..e6a8f06 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -84,10 +84,9 @@
- backup-zookeeper-queens-sl:
run-test-opts: '-k TestBackupRestoreZooKeeper'
- deployment: heat-cicd-queens-dvr-sl
+ deployment: heat-cicd-queens-contrail41-sl
display-name: Backup/restore Zookeeper
-
jobs:
- '{test_scenario}'
@@ -170,7 +169,7 @@
- ceph-update-luminous-to-nautilus:
deployment: heat-cicd-pike-dvr-sl
disabled: true
- run-test-opts: '-k TestCephUpdate'
+ run-test-opts: '-k TestCephLuminousUpgrade'
display-name: Update Ceph Luminous -> Nautilus
jobs:
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/openstack-compute.yml.src b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/openstack-compute.yml.src
new file mode 100644
index 0000000..aa19b19
--- /dev/null
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/openstack-compute.yml.src
@@ -0,0 +1,9 @@
+parameters:
+ cinder:
+ volume:
+ default_volume_type: lvm-driver
+ backend:
+ lvm-driver:
+ engine: lvm
+ type_name: lvm-driver
+ volume_group: cinder-vg
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 3edf1f9..adac9b5 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -44,12 +44,6 @@
infra_kvm03_control_address: 10.167.11.243
infra_kvm03_deploy_address: 172.16.162.69
infra_kvm03_hostname: kvm03
- infra_kvm04_control_address: 10.167.11.244
- infra_kvm04_deploy_address: 172.16.162.70
- infra_kvm04_hostname: kvm04
- infra_kvm05_control_address: 10.167.11.245
- infra_kvm05_deploy_address: 172.16.162.71
- infra_kvm05_hostname: kvm05
infra_kvm_vip_address: 10.167.11.240
infra_primary_first_nic: eth1
infra_primary_second_nic: eth2
@@ -67,183 +61,7 @@
maas_hostname: cfg01
maas_manage_deploy_network: 'True'
maas_machines: |
- kvm01: # #cz7675
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- # pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:33:26:74"
- interfaces:
- one1:
- mac: "0c:c4:7a:33:26:74"
- mode: "static"
- ip: "172.16.162.67"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- name: one1
- one2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:33:26:75"
- name: one2
- ten1:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:1d:f3:3e"
- name: ten1
- ten2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:1d:f3:3f"
- name: ten2
- power_parameters:
- power_address: "176.74.217.64"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm02: # #cz7626
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:33:25:c2"
- interfaces:
- one1:
- mac: "0c:c4:7a:33:25:c2"
- mode: "static"
- ip: "172.16.162.68"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- name: one1
- one2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:33:25:c3"
- name: one2
- ten1:
- mode: UNCONFIGURED
- mac: "00:25:90:61:c9:e0"
- name: ten1
- ten2:
- mode: UNCONFIGURED
- mac: "00:25:90:61:c9:e1"
- name: ten2
- power_parameters:
- power_address: "185.8.59.228"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm03: # #cz7757
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:69:a0:50"
- interfaces:
- one1:
- mac: "0c:c4:7a:69:a0:50"
- mode: "static"
- ip: "172.16.162.69"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- name: one1
- one2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:69:a0:51"
- name: one2
- ten1:
- mode: UNCONFIGURED
- mac: "00:25:90:3a:f0:38"
- name: ten1
- ten2:
- mode: UNCONFIGURED
- mac: "00:25:90:3a:f0:39"
- name: ten2
- power_parameters:
- power_address: "5.43.225.89"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm04: # #cz7899
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- # pxe_interface_mac:
- pxe_interface_mac: "0c:c4:7a:6d:3d:fa"
- interfaces:
- one1:
- mac: "0c:c4:7a:6d:3d:fa"
- mode: "static"
- ip: "172.16.162.70"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- name: one1
- one2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:6d:3d:fb"
- name: one2
- ten1:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:1e:41:88"
- name: ten1
- ten2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:1e:41:89"
- name: ten2
- power_parameters:
- power_address: "5.43.227.11"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- kvm05: # #cz7909
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:34:53:2a"
- interfaces:
- one1:
- mac: "0c:c4:7a:34:53:2a"
- mode: "static"
- ip: "172.16.162.71"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- name: one1
- one2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:34:53:2b"
- name: one2
- ten1:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:1d:92:c8"
- name: ten1
- ten2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:1d:92:c9"
- name: ten2
- power_parameters:
- power_address: "5.43.227.19"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- gtw01: # #cz7739
- distro_series: "xenial"
- # hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:34:52:0c"
- interfaces:
- one1:
- mac: "0c:c4:7a:34:52:0c"
- mode: "static"
- ip: "172.16.162.72"
- subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
- gateway: ${_param:deploy_network_gateway}
- name: one1
- one2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:34:52:0d"
- name: one2
- ten1:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:58:e9:66"
- name: ten1
- ten2:
- mode: UNCONFIGURED
- mac: "0c:c4:7a:58:e9:67"
- name: ten2
- power_parameters:
- power_address: "5.43.225.74"
- power_pass: ==IPMI_PASS==
- power_type: ipmi
- power_user: ==IPMI_USER==
- cmp001: # #cz7694
+ kvm01: # #cz7694
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:34:66:fe"
@@ -251,7 +69,7 @@
one1:
mac: "0c:c4:7a:34:66:fe"
mode: "static"
- ip: "172.16.162.73"
+ ip: "172.16.162.67"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -272,7 +90,7 @@
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
- cmp002: # #cz7950
+ kvm02: # #cz7950
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:6c:83:60"
@@ -280,7 +98,7 @@
one1:
mac: "0c:c4:7a:6c:83:60"
mode: "static"
- ip: "172.16.162.74"
+ ip: "172.16.162.68"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
gateway: ${_param:deploy_network_gateway}
name: one1
@@ -301,6 +119,231 @@
power_pass: ==IPMI_PASS==
power_type: ipmi
power_user: ==IPMI_USER==
+ kvm03: # #cz7739
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:34:52:0c"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:34:52:0c"
+ mode: "static"
+ ip: "172.16.162.69"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:34:52:0d"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:e9:66"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:58:e9:67"
+ name: ten2
+ power_parameters:
+ power_address: "5.43.225.74"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw01: # #cz7675
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:33:26:74"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:33:26:74"
+ mode: "static"
+ ip: "172.16.162.70"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:33:26:75"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:f3:3e"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:f3:3f"
+ name: ten2
+ power_parameters:
+ power_address: "176.74.217.64"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw02: # #cz7626
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:33:25:c2"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:33:25:c2"
+ mode: "static"
+ ip: "172.16.162.71"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:33:25:c3"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:61:c9:e0"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:61:c9:e1"
+ name: ten2
+ power_parameters:
+ power_address: "185.8.59.228"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ gtw03: # #cz7757
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:69:a0:50"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:69:a0:50"
+ mode: "static"
+ ip: "172.16.162.72"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:69:a0:51"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "00:25:90:3a:f0:38"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "00:25:90:3a:f0:39"
+ name: ten2
+ power_parameters:
+ power_address: "5.43.225.89"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp001: # #cz7899
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ # pxe_interface_mac:
+ pxe_interface_mac: "0c:c4:7a:6d:3d:fa"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:6d:3d:fa"
+ mode: "static"
+ ip: "172.16.162.73"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:6d:3d:fb"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:41:88"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1e:41:89"
+ name: ten2
+ disk_layout:
+ type: custom
+ bootable_device: sda
+ disk:
+ sda:
+ type: physical
+ vgroot:
+ type: lvm
+ devices:
+ - sda
+ volume:
+ lvroot:
+ size: 800G
+ type: ext4
+ mount: '/'
+ sdb:
+ type: physical
+ cinder-vg:
+ type: lvm
+ devices:
+ - sdb
+ volume:
+ cinder-volumes-pool:
+ size: 800G
+ power_parameters:
+ power_address: "5.43.227.11"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+ cmp002: # #cz7909
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ pxe_interface_mac: "0c:c4:7a:34:53:2a"
+ interfaces:
+ one1:
+ mac: "0c:c4:7a:34:53:2a"
+ mode: "static"
+ ip: "172.16.162.74"
+ subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
+ gateway: ${_param:deploy_network_gateway}
+ name: one1
+ one2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:34:53:2b"
+ name: one2
+ ten1:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:92:c8"
+ name: ten1
+ ten2:
+ mode: UNCONFIGURED
+ mac: "0c:c4:7a:1d:92:c9"
+ name: ten2
+ disk_layout:
+ type: custom
+ bootable_device: sda
+ disk:
+ sda:
+ type: physical
+ vgroot:
+ type: lvm
+ devices:
+ - sda
+ volume:
+ lvroot:
+ type: ext4
+ mount: '/'
+ size: 800G
+ sdb:
+ type: physical
+ cinder-vg:
+ type: lvm
+ devices:
+ - sdb
+ volume:
+ cinder-volumes-pool:
+ size: 800G
+ power_parameters:
+ power_address: "5.43.227.19"
+ power_pass: ==IPMI_PASS==
+ power_type: ipmi
+ power_user: ==IPMI_USER==
+
mcp_version: proposed
mcp_docker_registry: docker-prod-local.docker.mirantis.net
mcp_common_scripts_branch: ''
@@ -311,13 +354,13 @@
openldap_organisation: ${_param:cluster_name}
openstack_benchmark_node01_address: 10.167.11.95
openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: golden
+ openstack_cluster_size: compact
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_single_address_ranges: 10.167.11.15-10.167.11.16
openstack_compute_deploy_address_ranges: 172.16.162.73-172.16.162.74
- openstack_compute_tenant_address_ranges: 10.167.13.15-10.167.13.17
- openstack_compute_backend_address_ranges: 10.167.13.15-10.167.13.17
+ openstack_compute_tenant_address_ranges: 10.167.13.15-10.167.13.16
+ openstack_compute_backend_address_ranges: 10.167.13.15-10.167.13.16
openstack_control_address: 10.167.11.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.11.11
@@ -335,10 +378,18 @@
openstack_database_node03_address: 10.167.11.53
openstack_database_node03_hostname: dbs03
openstack_enabled: 'True'
- openstack_gateway_node01_deploy_address: 172.16.162.72
- openstack_gateway_node01_address: 10.167.11.224
openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.13.9
+ openstack_gateway_node01_deploy_address: 172.16.162.70
+ openstack_gateway_node01_address: 10.167.11.224
+ openstack_gateway_node01_tenant_address: 10.167.12.9
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_deploy_address: 172.16.162.71
+ openstack_gateway_node02_address: 10.167.11.225
+ openstack_gateway_node02_tenant_address: 10.167.12.10
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_deploy_address: 172.16.162.72
+ openstack_gateway_node03_address: 10.167.11.226
+ openstack_gateway_node03_tenant_address: 10.167.12.11
openstack_message_queue_address: 10.167.11.40
openstack_message_queue_hostname: msg
openstack_message_queue_node01_address: 10.167.11.41
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-environment.yaml
index 1e2069f..b3849e4 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-environment.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt-context-environment.yaml
@@ -49,34 +49,6 @@
ten2:
role: bond_ctl_lacp
- kvm04.bm-e7-cicd-pike-ovs-maas.local:
- reclass_storage_name: infra_kvm_node04
- roles:
- - infra_kvm_wo_gluster
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- one1:
- role: single_mgm_dhcp
- ten1:
- role: bond_ctl_lacp
- ten2:
- role: bond_ctl_lacp
-
- kvm05.bm-e7-cicd-pike-ovs-maas.local:
- reclass_storage_name: infra_kvm_node05
- roles:
- - infra_kvm_wo_gluster
- - linux_system_codename_xenial
- - salt_master_host
- interfaces:
- one1:
- role: single_mgm_dhcp
- ten1:
- role: bond_ctl_lacp
- ten2:
- role: bond_ctl_lacp
-
cmp<<count>>:
reclass_storage_name: openstack_compute_rack01
roles:
@@ -90,7 +62,6 @@
ten2:
role: bond_ctl_lacp
-
gtw01.bm-e7-cicd-pike-ovs-maas.local:
reclass_storage_name: openstack_gateway_node01
roles:
@@ -104,3 +75,28 @@
ten2:
role: bond_ctl_lacp
+ gtw02.bm-e7-cicd-pike-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten1:
+ role: bond_ctl_lacp
+ ten2:
+ role: bond_ctl_lacp
+
+ gtw03.bm-e7-cicd-pike-ovs-maas.local:
+ reclass_storage_name: openstack_gateway_node03
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ one1:
+ role: single_mgm_dhcp
+ ten1:
+ role: bond_ctl_lacp
+ ten2:
+ role: bond_ctl_lacp
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt.yaml b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt.yaml
index a9617f1..4e7069c 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt.yaml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/salt.yaml
@@ -84,16 +84,13 @@
{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
-- description: Disable known_hosts_autopopulation
+- description: Add route cfg01 to kvm01, remove rp_filter (CIS-3-2-7)
cmd: |
- set -ex;
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools add-bool-key parameters.openssh.client.known_hosts_autopopulation false /srv/salt/reclass/nodes/_generated/cfg01.{{ LAB_CONFIG_NAME }}.local.yml
- cd /srv/salt/reclass
- git add /srv/salt/reclass/nodes/_generated
- git commit -m "[from tcp-qa] known_hosts_autopopulation disabled"
- salt -C 'I@salt:master' saltutil.refresh_pillar
- salt -C 'I@salt:master' saltutil.sync_all
+ set -x;
+ set -e;
+ salt 'kvm01*' cmd.run 'sysctl -w net.ipv4.ip_forward=1';
+ ip route add 10.167.11.0/24 via 172.16.162.67;
+ sed -i 's/er: 1/er: 0/g' /srv/salt/reclass/classes/service/linux/system/cis/cis-3-2-7.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml
index a8f4a8d..4cefd30 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml
@@ -4,7 +4,7 @@
# Other salt model repository parameters see in shared-salt.yaml
{% set LAB_CONFIG_NAME = 'bm-e7-cicd-pike-ovs-maas' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-cicd-pike-ovs-maas') %}
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-e7-cicd-pike-ovs-maas') %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs.yaml' %}
{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml','salt-context-cookiecutter-openstack_ovs.yaml'] %}
@@ -27,7 +27,7 @@
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL(KNOWN_HOST_AUTOPOPULATION=false) }}
- description: Temporary WR for correct bridge name according to envoronment templates
@@ -59,4 +59,21 @@
reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
+
+- description: "Setting workarounds for non-ceph deployments"
+ cmd: |
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key --merge classes system.cinder.volume.single /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml
+ reclass-tools add-key --merge classes system.cinder.volume.notification.messagingv2 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml
+ reclass-tools add-key parameters.cinder.volume.default_volume_type lvm-driver /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml
+ reclass-tools add-key parameters.cinder.volume.backend.lvm-driver.engine lvm /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml
+ reclass-tools add-key parameters.cinder.volume.backend.lvm-driver.type_name lvm-driver /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml
+ reclass-tools add-key parameters.cinder.volume.backend.lvm-driver.volume_group cinder-vg /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute/init.yml
+ sed -i '/system.cinder.volume.single/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml
+ sed -i '/system.cinder.volume.notification.messagingv2/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-environment.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-environment.yaml
index 59f67ba..a644c0d 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-environment.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-environment.yaml
@@ -247,6 +247,8 @@
role: single_dhcp
ens4:
role: single_ctl
+ ens6:
+ role: single_dhcp
ntw02:
reclass_storage_name: opencontrail_control_node02
@@ -258,6 +260,8 @@
role: single_dhcp
ens4:
role: single_ctl
+ ens6:
+ role: single_dhcp
ntw03:
reclass_storage_name: opencontrail_control_node03
@@ -269,6 +273,8 @@
role: single_dhcp
ens4:
role: single_ctl
+ ens6:
+ role: single_dhcp
mtr01:
reclass_storage_name: stacklight_telemetry_node01
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 412c7ca..5621b02 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -583,7 +583,7 @@
{%- endmacro %}
-{%- macro MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() %}
+{%- macro MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL(KNOWN_HOST_AUTOPOPULATION=true) %}
{########################################################}
- description: "[EXPERIMENTAL] Clone 'environment-template' repository to cfg01.{{ DOMAIN_NAME }}"
@@ -681,6 +681,18 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: "[EXPERIMENTAL] Disable known_host_autopopulation if its not enabled"
+ cmd: |
+ {%- if not KNOWN_HOST_AUTOPOPULATION %}
+ set -e;
+ set -x;
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-bool-key parameters.reclass.storage.node.infra_config_node01.parameters.openssh.client.known_hosts_autopopulation false /srv/salt/reclass/classes/environment/{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}/init.yml
+ {%- endif %}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: Modify generated model and reclass-system
cmd: |
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
index c47f8aa..7d29e4f 100644
--- a/tcp_tests/tests/system/test_backup_restore.py
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -165,15 +165,17 @@
:param reclass: ReclassManager, tcp-qa Reclass-tools manager
:param path: str, path to YAML file to update
"""
- reclass.add_bool_key("parameters.backupninja.enabled", "True", path)
- reclass.add_key(
- "parameters.backupninja.client.backup_times.hour",
- "\"'*'\"",
- path)
- reclass.add_key(
- "parameters.backupninja.client.backup_times.minute",
- "\"'*/10'\"",
- path)
+ backup_params = """
+ parameters:
+ backupninja:
+ enabled: True
+ client:
+ backup_times:
+ hour: *
+ minute: 10
+ """
+
+ reclass.merge_context(backup_params, path)
def _precreate_test_files(self, salt, ssh, base_dirs, test_files):
"""Prepare test files for scenarios
diff --git a/tcp_tests/tests/system/test_ceph_luminous_upgrade.py b/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
index 8e06888..bf7346c 100644
--- a/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
+++ b/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
@@ -19,9 +19,9 @@
Scenario:
1. Chenge parameters in reclass
2. Run Pipeline Ceph - upgrade
- https://docs.mirantis.com/mcp/master/mcp-operations-guide/
- update-upgrade/major-upgrade/ceph-upgrade/upgrade-ceph.html
- """
+ https://docs.mirantis.com/mcp/master/mcp-operations-guide/ update-upgrade/major-upgrade/ceph-upgrade/upgrade-ceph.html
+ """ # noqa: E501
+
salt = salt_actions
reclass = reclass_actions
dt = drivetrain_actions
@@ -60,7 +60,8 @@
"WAIT_FOR_HEALTHY": True,
"ASK_CONFIRMATION": False
}
- upgrade_ceph = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name='ceph-upgrade',
job_parameters=job_parameters)
- assert upgrade_ceph == 'SUCCESS'
+
+ assert job_result == 'SUCCESS', job_description
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
index 44658ea..a89d711 100644
--- a/tcp_tests/tests/system/test_failover_ceph.py
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -126,31 +126,31 @@
# Run Tempest smoke test suite
show_step(5)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.TEMPEST_JOB_NAME,
job_parameters=self.TEMPEST_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{0}' job run status is {1} after executing Tempest smoke "
"tests".format(
- self.TEMPEST_JOB_NAME, status)
+ self.TEMPEST_JOB_NAME, job_description)
)
# Run Sanity test
show_step(6)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.SANITY_JOB_NAME,
job_parameters=self.SANITY_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{0}' job run status is {1} after executing selected sanity "
"tests".format(
- self.SANITY_JOB_NAME, status)
+ self.SANITY_JOB_NAME, job_description)
)
@pytest.mark.grab_versions
@@ -222,31 +222,31 @@
# Run Tempest smoke test suite
show_step(5)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.TEMPEST_JOB_NAME,
job_parameters=self.TEMPEST_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{0}' job run status is {1} after executing Tempest smoke "
"tests".format(
- self.TEMPEST_JOB_NAME, status)
+ self.TEMPEST_JOB_NAME, job_description)
)
# Run Sanity test
show_step(6)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.SANITY_JOB_NAME,
job_parameters=self.SANITY_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{0}' job run status is {1} after executing selected sanity "
"tests".format(
- self.SANITY_JOB_NAME, status)
+ self.SANITY_JOB_NAME, job_description)
)
@pytest.mark.grab_versions
@@ -317,31 +317,31 @@
# Run Tempest smoke test suite
show_step(5)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.TEMPEST_JOB_NAME,
job_parameters=self.TEMPEST_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{0}' job run status is {1} after executing Tempest smoke "
"tests".format(
- self.TEMPEST_JOB_NAME, status)
+ self.TEMPEST_JOB_NAME, job_description)
)
# Run Sanity test
show_step(6)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.SANITY_JOB_NAME,
job_parameters=self.SANITY_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{0}' job run status is {1} after executing selected sanity "
"tests".format(
- self.SANITY_JOB_NAME, status)
+ self.SANITY_JOB_NAME, job_description)
)
# #######################################################################