Merge "Rename virtual-mcp-mitaka-dvr template"
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index a8cea3d..991fb58 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -101,7 +101,7 @@
LOG.info("\n\n{0}\n{1}".format(
msg + retry_msg, '=' * len(msg + retry_msg)))
- result = remote.execute('set -ex; ' + cmd, verbose=True)
+ result = remote.execute(cmd, verbose=True)
# Workaround of exit code 0 from salt in case of failures
failed = 0
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 55ccbbe..fce80cc 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -494,7 +494,8 @@
.format(node['node_name']))
master.check_call("rsync -aruv {0}:/root/*.tar.gz "
"/root/dump/".format(node['node_name']),
- raise_on_err=False)
+ raise_on_err=False,
+ timeout=120)
destination_name = '/root/{0}_dump.tar.gz'.format(artifact_name)
# Archive the artifacts from all nodes
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
index 04390ff..e17ac5b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -90,7 +90,7 @@
openstack_neutron_vlan_aware_vms: 'False'
openstack_nfv_dpdk_enabled: 'True'
openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_hugepages_count: '600'
+ openstack_nova_compute_hugepages_count: '2048'
openstack_nova_compute_nfv_req_enabled: 'False'
openstack_nova_cpu_pinning: '3'
openstack_ovs_dvr_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
index 913636b..5a68626 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
@@ -134,12 +134,12 @@
role: single_dhcp
ens4:
role: single_ctl
- ens5:
+ dpdkport0:
role: bond2_dpdk_prv
- dpdk_pci: "00:05.0"
- ens6:
+ dpdk_pci: "0000:00:05.0"
+ dpdkport1:
role: bond2_dpdk_prv
- dpdk_pci: "00:06.0"
+ dpdk_pci: "0000:00:06.0"
ens7:
role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
index d424970..ce48613 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
@@ -30,8 +30,35 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: "Workaround for PROD-18834: Pre-install linux-headers package"
+ cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Workaround for PROD-17975: Pre-install ovs packages to update alternatives to DPDK"
+ cmd: |
+ set -ex;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo
+ salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
+ salt 'cmp*' cmd.run "service openvswitch-switch stop";
+ salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*";
+ salt 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
+ salt 'cmp*' cmd.run "service openvswitch-switch start";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+- description: "Workaround to avoid reboot cmp nodes: bring OVS interfaces UP"
+ cmd: |
+ salt 'cmp*' cmd.run "ifup br-prv";
+ salt 'cmp*' cmd.run "ip l set up br-floating";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Hack gtw node
cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
index c195854..bdea5bc 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
@@ -39,6 +39,11 @@
reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # set wider cpu mask for DPDK
+ salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index fa6aa9c..6bf534a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -426,3 +426,72 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
+
+- description: Install manila-api on first node
+ cmd: |
+ salt -C 'I@manila:api and *01*' state.sls manila.api;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install manila-api on other nodes
+ cmd: |
+ salt -C 'I@manila:api and not *01*' state.sls manila.api;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install manila-scheduler
+ cmd: |
+ salt -C 'I@manila:scheduler' state.sls manila.scheduler;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install manila-share
+ cmd: |
+ salt -C 'I@manila:share' state.sls manila.share;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Adding manila type as default in manila.conf (fixed in https://gerrit.mcp.mirantis.net/#/c/17893)
+ cmd: |
+ salt -C 'ctl* or share*' cmd.run "sed -i 's/\[DEFAULT\]/\[DEFAULT\]\ndefault_share_type = default/g' /etc/manila/manila.conf";
+ salt -C 'I@apache:server' cmd.run "service apache2 restart";
+ salt -C 'I@manila:scheduler' cmd.run "service manila-scheduler restart";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary WR for CIFS type. (Fixed in this patch https://gerrit.mcp.mirantis.net/#/c/17727/)
+ cmd: |
+ salt -C 'I@manila:share' cmd.run 'apt-get install samba -y';
+ salt -C 'I@manila:share' cmd.run 'service manila-share restart';
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check manila-services
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create manila type
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default False --is_public True'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Create CIFS and NFS share and check it status
+ cmd: |
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
+ sleep 5;
+ salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 754e0d7..080cb4d 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -54,8 +54,8 @@
'oss_deployed',
'oss_sl_os_deployed',
# drivetrain_fixtures
- 'drivetrain_actions'
- 'drivetrain_deployed'
+ 'drivetrain_actions',
+ 'drivetrain_deployed',
# decapod_fixtures
'decapod_actions',
'decapod_deployed',
diff --git a/tcp_tests/tests/system/test_failover_openstack_services.py b/tcp_tests/tests/system/test_failover_openstack_services.py
index 37cff72..08a928b 100644
--- a/tcp_tests/tests/system/test_failover_openstack_services.py
+++ b/tcp_tests/tests/system/test_failover_openstack_services.py
@@ -95,7 +95,7 @@
@pytest.mark.fail_snapshot
@pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
def test_restart_keepalived(self, func_name, underlay, config,
- openstack_deployed, sl_os_deployed,
+ openstack_deployed,
common_services_actions,
salt_actions, openstack_actions,
rally, show_step):
@@ -164,7 +164,7 @@
@pytest.mark.fail_snapshot
@pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
def test_stop_keepalived(self, func_name, underlay, config,
- openstack_deployed, sl_os_deployed,
+ openstack_deployed,
common_services_actions,
salt_actions, openstack_actions,
rally, show_step):
@@ -251,7 +251,7 @@
@pytest.mark.fail_snapshot
@pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
def test_kill_keepalived(self, func_name, underlay, config,
- openstack_deployed, sl_os_deployed,
+ openstack_deployed,
common_services_actions,
salt_actions, openstack_actions,
rally, show_step):
@@ -430,7 +430,7 @@
@pytest.mark.fail_snapshot
@pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
def test_kill_rabbit_galera(self, func_name, underlay, config,
- openstack_deployed, sl_os_deployed,
+ openstack_deployed,
common_services_actions,
salt_actions, openstack_actions,
rally, show_step):