Merge "Revert "Add tests to run cvp-func and cvp-sanity jobs""
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index 409034e..8cf5bc0 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -171,6 +171,30 @@
@pytest.fixture(scope='function')
+def conformance_helper(request, func_name, k8s_actions):
+ prepare_log = request.keywords.get('prepare_log', None)
+ merge_xunit = request.keywords.get('merge_xunit', None)
+ download_target = request.keywords.get('download', None)
+
+ def test_fin():
+ if hasattr(request.node, 'rep_call') and \
+ (request.node.rep_call.passed or request.node.rep_call.failed)\
+ and download_target:
+ files = utils.extract_name_from_mark(download_target) \
+ or "{}".format(func_name)
+ logfile = utils.extract_name_from_mark(prepare_log, 'filepath')
+ if prepare_log:
+ k8s_actions.move_file_to_root_folder(logfile)
+ if merge_xunit:
+ path = utils.extract_name_from_mark(merge_xunit, 'path')
+ output = utils.extract_name_from_mark(merge_xunit, 'output')
+ k8s_actions.combine_xunit(path, output)
+ k8s_actions.download_k8s_logs(files)
+
+ request.addfinalizer(test_fin)
+
+
+@pytest.fixture(scope='function')
def k8s_cncf_log_helper(request, func_name, underlay, k8s_deployed):
"""Finalizer to prepare cncf tar.gz and save results from archive"""
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 79974d3..3ae1a1b 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -41,6 +41,7 @@
self._api = None
self.kubectl = K8SKubectlCli(self)
self.virtlet = K8SVirtlet(self)
+ self.conformance_node = None
super(K8SManager, self).__init__(config=config, underlay=underlay)
def install(self, commands):
@@ -299,6 +300,61 @@
timeout_msg="Timeout for CNCF reached."
)
+ def start_conformance_inside_pod(self, cnf_type='k8s', timeout=60 * 60):
+ """
+ Create conformance pod and wait for results
+ :param cnf_type: k8s or virtlet. choose what conformance you want
+ :param timeout:
+ :return:
+ """
+ if cnf_type == 'k8s':
+ pod_mark = 'conformance'
+ elif cnf_type == 'virtlet':
+ pod_mark = 'virtlet-conformance'
+ else:
+ LOG.error("Unknown conformance type or it even not set")
+ raise RuntimeError("Unknown conformance type")
+ conformance_cmd = "kubectl apply -f /srv/kubernetes/{}.yml" \
+ "".format(pod_mark)
+ self.controller_check_call(conformance_cmd, timeout=900)
+
+ cnf_pod = self.api.pods.get(pod_mark, pod_mark)
+ cnf_pod.wait_running()
+
+ pod = cnf_pod.read()
+ target = "{}.".format(pod.spec.node_name)
+ self.conformance_node = self.__underlay.get_target_node_names(
+ target)[0]
+
+ def cnf_status():
+ pod = cnf_pod.read()
+ status = pod.status.phase
+ LOG.info("Conformance status: {}".format(status))
+ return status
+
+ LOG.info("Waiting for Conformance to complete")
+ helpers.wait(
+ lambda: cnf_status() == ('Succeeded' or 'Failed'),
+ interval=120, timeout=timeout,
+ timeout_msg="Timeout for Conformance reached."
+ )
+
+ pod = cnf_pod.read()
+ status = pod.status.phase
+ if status is 'Failed':
+ describe = "kubectl describe po {0} -n {0}".format(pod_mark)
+ LOG.info(self.controller_check_call(describe, timeout=30))
+ raise RuntimeError("Conformance failed")
+
+ def move_file_to_root_folder(self, filepath):
+ cmd = "mv {0} /root/".format(filepath)
+ if self.conformance_node:
+ self.__underlay.check_call(
+ cmd=cmd, node_name=self.conformance_node,
+ raise_on_err=False)
+ else:
+ LOG.info("Node is not properly set")
+
def extract_file_to_node(self, system='docker',
container='virtlet',
file_path='report.xml',
@@ -343,11 +399,16 @@
:param files:
:return:
"""
+ if self.conformance_node:
+ node = self.conformance_node
+ else:
+ node = self.controller_name
+ LOG.info("Trying to get logs at {}".format(node))
master_host = self.__config.salt.salt_master_host
with self.__underlay.remote(host=master_host) as r:
for log_file in files:
cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
- self.controller_name, log_file)
+ node, log_file)
r.check_call(cmd, raise_on_err=False)
LOG.info("Downloading the artifact {0}".format(log_file))
r.download(destination=log_file, target=os.getcwd())
@@ -362,7 +423,12 @@
:param output: Path to xml file where output will stored
:return:
"""
- with self.__underlay.remote(node_name=self.controller_name) as r:
+ if self.conformance_node:
+ node = self.conformance_node
+ else:
+ node = self.controller_name
+ LOG.info("Trying to combine xunit at {}".format(node))
+ with self.__underlay.remote(node_name=node) as r:
cmd = ("apt-get install python-setuptools -y; "
"pip install "
"https://github.com/mogaika/xunitmerge/archive/master.zip")
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index fca6a6d..e1fa137 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -64,6 +64,9 @@
DOCKER_REGISTRY = os.environ.get('DOCKER_REGISTRY',
'docker-prod-local.artifactory.mirantis.com')
+BINARY_REGISTRY = os.environ.get('BINARY_REGISTRY', 'https://'
+ 'docker-prod-local.artifactory.mirantis.com/'
+ 'artifactory/binary-prod-local')
DOCKER_NAME = os.environ.get('DOCKER_NAME',
'mirantis/oscore/rally-tempest:latest')
DOCKER_IMAGES_SL_TAG = os.environ.get('DOCKER_IMAGES_SL_TAG', 'latest')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 0a447d6..a689b07 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -325,11 +325,14 @@
ct.Cfg('kubernetes_admin_password', ct.String(),
default='sbPfel23ZigJF3Bm'),
ct.Cfg('kubernetes_docker_package', ct.String(), default=''),
- ct.Cfg('kubernetes_hyperkube_image', ct.String(),
- default='{}/mirantis/kubernetes/hyperkube-amd64:v1.11.3-2'.format(
- settings.DOCKER_REGISTRY)),
+ ct.Cfg('kubernetes_hyperkube_source', ct.String(),
+ default='{}/mirantis/kubernetes/hyperkube-binaries/'
+ 'hyperkube_v1.12.3-2_1544133573591'.format(
+ settings.BINARY_REGISTRY)),
+ ct.Cfg('kubernetes_hyperkube_source_hash', ct.String(),
+ default='md5=fc23eaf3ba63d9ed9d141f465f584012'),
ct.Cfg('kubernetes_pause_image', ct.String(),
- default='{}/mirantis/kubernetes/pause-amd64:v1.11.3-2'.format(
+ default='{}/mirantis/kubernetes/pause-amd64:v1.12.3-2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_image', ct.String(),
default='{}/mirantis/projectcalico/calico/node:v3.1.3'.format(
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
index 4d3ffd6..0cec934 100644
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/lab04-physical-inventory.yaml
@@ -1,77 +1,76 @@
-nodes:
- cfg01.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
+nodes:
+ cfg01.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
interfaces:
ens3:
role: single_static_mgm
- ens4:
- role: single_static_ctl
- # Physical nodes
-
- kvm01.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_kvm_node01
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
+ ens4:
+ role: single_static_ctl
+ # Physical nodes
+
+ kvm01.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
role: single_mgm_manual
-
- kvm02.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_kvm_node02
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
+
+ kvm02.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
role: single_mgm_manual
-
- kvm03.cookied-cicd-bm-os-contrail40-maas.local:
- reclass_storage_name: infra_kvm_node03
- roles:
- - infra_kvm
- - linux_system_codename_xenial
- interfaces:
- one1:
+
+ kvm03.cookied-cicd-bm-os-contrail40-maas.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
role: single_dhcp
- one2:
- role: bond0_ab_ovs_vlan_ctl
- ten1:
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+ ens11f0:
role: single_mgm_manual
-
- osd<<count>>:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- one1:
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
role: single_dhcp
- one2:
- role: single_vlan_ctl
+ enp2s0f1:
+ role: single_vlan_ctl
# role: bond0_ab_vlan_ceph_storage_backend
-
- cmp<<count>>:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- #one1: unused
- one2:
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f1:
role: single_dhcp
- ten1:
- role: bond0_ab_contrail
- ten2:
- role: single_vlan_ctl
+ enp5s0f0:
+ role: bond0_ab_contrail
+ enp5s0f1:
+ role: single_vlan_ctl
diff --git a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
index cb30db6..8446f86 100644
--- a/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-cicd-bm-os-contrail40-maas/salt-context-cookiecutter-contrail.yaml
@@ -129,7 +129,7 @@
# pxe_interface_mac:
pxe_interface_mac: "0c:c4:7a:6c:83:56"
interfaces:
- one1:
+ enp9s0f0:
mac: "0c:c4:7a:6c:83:56"
mode: "static"
ip: "172.16.49.67"
@@ -145,7 +145,7 @@
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:6c:84:2c"
interfaces:
- one1:
+ enp9s0f0:
mac: "0c:c4:7a:6c:84:2c"
mode: "static"
ip: "172.16.49.68"
@@ -161,7 +161,7 @@
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:6c:83:54"
interfaces:
- one1:
+ enp9s0f0:
mac: "0c:c4:7a:6c:83:54"
mode: "static"
ip: "172.16.49.69"
@@ -177,7 +177,7 @@
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:55:6a:d4"
interfaces:
- one1:
+ enp2s0f0:
mac: "0c:c4:7a:55:6a:d4"
mode: "static"
ip: "172.16.49.70"
@@ -191,10 +191,10 @@
osd002: # #cz7343-kvm.host-telecom.com
distro_series: "xenial"
# hwe_kernel: "hwe-16.04"
- pxe_interface_mac: "0c:c4:7a:55:6a:57"
+ pxe_interface_mac: "0c:c4:7a:55:6a:56"
interfaces:
- one1:
- mac: "0c:c4:7a:55:6a:57"
+ enp2s0f0:
+ mac: "0c:c4:7a:55:6a:56"
mode: "static"
ip: "172.16.49.71"
subnet: ${maas:region:subnets:deploy_network:cidr} # create it manually... in UI
@@ -209,7 +209,7 @@
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:55:6a:2a"
interfaces:
- one1:
+ enp2s0f0:
mac: "0c:c4:7a:55:6a:2a"
mode: "static"
ip: "172.16.49.72"
@@ -225,7 +225,7 @@
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:54:a2:5f"
interfaces:
- one2:
+ enp2s0f1:
mac: "0c:c4:7a:54:a2:5f"
mode: "static"
ip: "172.16.49.73"
@@ -241,7 +241,7 @@
# hwe_kernel: "hwe-16.04"
pxe_interface_mac: "0c:c4:7a:54:a0:51"
interfaces:
- one2:
+ enp2s0f1:
mac: "0c:c4:7a:54:a0:51"
mode: "static"
ip: "172.16.49.74"
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
index 53c9687..150e001 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
@@ -204,3 +204,8 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index b26a0dd..dcf4654 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -294,4 +294,9 @@
openstack_mysql_x509_enabled: 'True'
rabbitmq_ssl_enabled: 'True'
openstack_rabbitmq_x509_enabled: 'True'
- openstack_internal_protocol: 'https'
\ No newline at end of file
+ openstack_internal_protocol: 'https'
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
index b0c69e8..3f5da39 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
@@ -232,3 +232,8 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
index a5a862b..1ced1bd 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -266,4 +266,8 @@
octavia_lb_mgmt_cidr: 192.168.1.0/24
octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
index 0fa8367..8c02c8e 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
@@ -266,5 +266,9 @@
octavia_lb_mgmt_cidr: 192.168.1.0/24
octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
-
+ openstack_create_public_network: 'False'
+ openstack_public_neutron_subnet_gateway: 172.17.16.1
+ openstack_public_neutron_subnet_cidr: 172.17.16.0/24
+ openstack_public_neutron_subnet_allocation_start: 172.17.16.201
+ openstack_public_neutron_subnet_allocation_end: 172.17.16.245
diff --git a/tcp_tests/templates/k8s-ha-calico/core.yaml b/tcp_tests/templates/k8s-ha-calico/core.yaml
index 1d0a8d2..e5ff52e 100644
--- a/tcp_tests/templates/k8s-ha-calico/core.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/core.yaml
@@ -13,14 +13,14 @@
-C 'I@docker:host' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
- skip_fail: false
+ skip_fail: true
- description: Check docker
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@docker:host' cmd.run 'docker ps'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: false
+ skip_fail: true
- description: Install keepalived on first node
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 2746d2f..c3bc0f3 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -288,6 +288,7 @@
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {} +
{%- if IS_CONTRAIL_LAB %}
# vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
@@ -615,6 +616,7 @@
cmd: |
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {} +
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index 67d3635..bc57e38 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -186,18 +186,17 @@
k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
- @pytest.mark.extract(container_system='docker', extract_from='conformance',
- files_to_extract=['report'])
- @pytest.mark.merge_xunit(path='/root/report',
+ @pytest.mark.prepare_log(filepath='/tmp/conformance/conformance.log')
+ @pytest.mark.merge_xunit(path='/tmp/conformance',
output='/root/conformance_result.xml')
- @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
- 'conformance_result.xml'])
+ @pytest.mark.download(name=['conformance.log',
+ 'conformance_result.xml'])
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8116
@pytest.mark.k8s_calico
def test_only_k8s_install(self, config, show_step,
- k8s_deployed, k8s_logs):
+ k8s_deployed, conformance_helper):
"""Test for deploying MCP environment with k8s and check it
Scenario:
@@ -211,5 +210,5 @@
if config.k8s.k8s_conformance_run:
show_step(5)
- k8s_deployed.run_conformance()
+ k8s_deployed.start_conformance_inside_pod()
LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index d3b6c27..9ca2fe6 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -88,12 +88,16 @@
show_step(4)
vm_pod.delete()
+ @pytest.mark.prepare_log(filepath='/tmp/virtlet-conformance/'
+ 'virtlet_conformance.log')
+ @pytest.mark.merge_xunit(path='/tmp/virtlet-conformance',
+ output='/root/report.xml')
+ @pytest.mark.download(name=['virtlet_conformance.log',
+ 'report.xml'])
@pytest.mark.grab_versions
- @pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
- 'report.xml'])
@pytest.mark.fail_snapshot
def test_virtlet_conformance(self, show_step, config, k8s_deployed,
- k8s_logs):
+ conformance_helper):
"""Test run of virtlet conformance tests
Scenario:
@@ -102,4 +106,4 @@
"""
show_step(1)
- k8s_deployed.run_virtlet_conformance()
+ k8s_deployed.start_conformance_inside_pod(cnf_type='virtlet')