Merge "[ocata/mitaka] Fix ocata/mitaka templates"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index df437b3..7d2ce53 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -37,6 +37,10 @@
def testrail_name_template = ''
def reporter_extra_options = []
+ //stage("Archive all xml reports") {
+ // archiveArtifacts artifacts: "${PARENT_WORKSPACE}/*.xml"
+ //}
+
stage("Deployment report") {
report_name = "deployment_${ENV_NAME}.xml"
testSuiteName = "[MCP] Integration automation"
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 7aee6fd..775b901 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -81,8 +81,10 @@
def String junit_report_xml = readFile("${junit_report_filename}")
def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+ // Replace '<' and '>' to '<' and '>' to avoid conflicts between xml tags in the message and JUnit report
+ def String junit_report_xml_filtered = junit_report_xml_pretty.replaceAll("<","<").replaceAll(">", ">")
def String msg = "Job '${job_url}' failed with status ${build_status}, JUnit report:\n"
- throw new Exception(msg + junit_report_xml_pretty)
+ throw new Exception(msg + junit_report_xml_filtered)
} else {
throw new Exception("Job '${job_url}' failed with status ${build_status}, please check the console output.")
}
@@ -365,8 +367,7 @@
// <filename> is name of the XML report file that will be created
// <status> is one of the 'success', 'skipped', 'failure' or 'error'
// 'error' status is assumed as 'Blocker' in TestRail reporter
- run_cmd("""\
-cat << \'EOF\' > ${filename}
+ def script = """\
<?xml version=\"1.0\" encoding=\"utf-8\"?>
<testsuite>
<testcase classname=\"${classname}\" name=\"${name}\" time=\"0\">
@@ -375,8 +376,8 @@
<system-err>${stderr}</system-err>
</testcase>
</testsuite>
-EOF
-""")
+"""
+ writeFile(file: filename, text: script, encoding: "UTF-8")
}
def upload_results_to_testrail(report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options=[]) {
@@ -407,7 +408,7 @@
def script = """
. ${venvPath}/bin/activate
set -ex
- report_xml=\$(find \$(pwd) -name "${report_name}")
+ report_xml=\$(find ${PARENT_WORKSPACE} -name "${report_name}")
if [ -n "\${report_xml}" ]; then
report ${reporterOptions.join(' ')} \${report_xml}
fi
@@ -430,7 +431,7 @@
def STATUS_MAP = ['SUCCESS': 'success', 'FAILURE': 'failure', 'UNSTABLE': 'failure', 'ABORTED': 'error']
def classname = "Deploy"
def name = "deployment_${ENV_NAME}"
- def filename = "\$(pwd)/${name}.xml"
+ def filename = "${name}.xml"
def status = STATUS_MAP[result ?: 'FAILURE'] // currentBuild.result *must* be set at the finish of the try/catch
create_xml_report(filename, classname, name, status, "Deploy components: ${deploy_expected_stacks}", text, '', '')
}
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index 156e226..5b7fd4c 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -46,6 +46,7 @@
cmd = ("docker images | grep {0}| grep {1}| awk '{{print $3}}'"
.format(self.image_name, self.image_version))
res = self._underlay.check_call(cmd, node_name=self._node_name)
+ LOG.debug(res['stdout'])
image_id = res['stdout'][0].strip()
LOG.info("Image ID is {}".format(image_id))
return image_id
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 5411881..b24cba2 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -134,6 +134,14 @@
def create_flavors(self):
return self.salt_api.local('cfg01*', 'state.sls', 'nova.client')
+ def set_property(self):
+ return self.salt_api.local(
+ tgt='ctl01*',
+ fun='cmd.run',
+ args='. /root/keystonercv3; openstack '
+ 'flavor set m1.tiny_test '
+ '--property hw:mem_page_size=small')
+
def create_cirros(self):
return self.salt_api.local('cfg01*', 'state.sls', 'glance.client')
@@ -198,7 +206,7 @@
indent=4, sort_keys=True)
f.write(container_inspect)
- def prepare(self):
+ def prepare(self, dpdk=None):
self.store_runtest_model()
res = self.install_python_lib()
@@ -215,6 +223,11 @@
res = self.create_flavors()
LOG.info(json.dumps(res, indent=4))
time.sleep(20)
+ if dpdk:
+ res = self.set_property()
+ LOG.info('Update flavor property')
+ LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
res = self.create_cirros()
LOG.info(json.dumps(res, indent=4))
@@ -304,12 +317,12 @@
return {'inspect': inspect,
'logs': logs}
- def prepare_and_run_tempest(self, username='root'):
+ def prepare_and_run_tempest(self, username='root', dpdk=None):
"""
Run tempest tests
"""
tempest_timeout = settings.TEMPEST_TIMEOUT
- self.prepare()
+ self.prepare(dpdk=dpdk)
test_res = self.run_tempest(tempest_timeout)
self.fetch_arficats(username=username)
self.save_runtime_logs(**test_res)
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index ba3fb64..7613a2c 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -311,14 +311,17 @@
default='{}/mirantis/kubernetes/pause-amd64:v1.10.4-4'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/node:v2.6.10'.format(
+ default='{}/mirantis/projectcalico/calico/node:v3.1.3'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_calicoctl_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/ctl:v1.6.4'.format(
+ default='{}/mirantis/projectcalico/calico/ctl:v3.1.3'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_cni_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/cni:v1.11.6'.format(
+ default='{}/mirantis/projectcalico/calico/cni:v3.1.3'.format(
settings.DOCKER_REGISTRY)),
+ ct.Cfg('kubernetes_calico_kube_controllers_image', ct.String(),
+ default='{}/mirantis/projectcalico/calico/kube-controllers:'
+ 'v3.1.3'.format(settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_netchecker_enabled', ct.Boolean(),
help="", default=True),
ct.Cfg('kubernetes_netchecker_agent_image', ct.String(),
@@ -327,9 +330,6 @@
default='mirantis/k8s-netchecker-server:v1.2.2'),
ct.Cfg('kubernetes_calico_policy_enabled', ct.Boolean(),
help="", default=False),
- ct.Cfg('kubernetes_calico_policy_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/kube-controllers:'
- 'v1.0.4'.format(settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_helm_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_virtlet_enabled', ct.Boolean(),
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index f70d915..898017e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -109,11 +109,6 @@
internal_proxy_enabled: 'False'
kqueen_custom_mail_enabled: 'False'
kqueen_enabled: 'False'
- kubernetes_compute_count: 2
- kubernetes_compute_rack01_deploy_subnet: 10.167.5
- kubernetes_compute_rack01_single_subnet: 10.167.4
- kubernetes_compute_rack01_tenant_subnet: 10.167.6
- kubernetes_compute_rack01_hostname: cmp
kubernetes_control_address: 10.167.4.10
kubernetes_control_node01_address: 10.167.4.11
kubernetes_control_node01_deploy_address: 10.167.5.11
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
index 2b89364..0a07a81 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -109,9 +109,8 @@
ens4:
role: single_ctl
- # Generator-based computes. For compatibility only
- cmp<<count>>:
- reclass_storage_name: kubernetes_compute_rack01
+ cmp001:
+ reclass_storage_name: kubernetes_compute_node01
roles:
- kubernetes_compute
- linux_system_codename_xenial
@@ -121,6 +120,20 @@
role: single_dhcp
ens4:
role: single_ctl
+ single_address: ${_param:kubernetes_compute_node01_address}
+
+ cmp002:
+ reclass_storage_name: kubernetes_compute_node02
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:kubernetes_compute_node02_address}
mon01:
reclass_storage_name: stacklight_server_node01
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index 5eae395..2f0e52e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -109,11 +109,6 @@
internal_proxy_enabled: 'False'
kqueen_custom_mail_enabled: 'False'
kqueen_enabled: 'False'
- kubernetes_compute_count: 2
- kubernetes_compute_rack01_deploy_subnet: 10.167.5
- kubernetes_compute_rack01_single_subnet: 10.167.4
- kubernetes_compute_rack01_tenant_subnet: 10.167.6
- kubernetes_compute_rack01_hostname: cmp
kubernetes_control_address: 10.167.4.10
kubernetes_control_node01_address: 10.167.4.11
kubernetes_control_node01_deploy_address: 10.167.5.11
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
index 66d7cec..4c01f4f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -109,9 +109,8 @@
ens4:
role: single_ctl
- # Generator-based computes. For compatibility only
- cmp<<count>>:
- reclass_storage_name: kubernetes_compute_rack01
+ cmp001:
+ reclass_storage_name: kubernetes_compute_node01
roles:
- kubernetes_compute
- linux_system_codename_xenial
@@ -121,3 +120,17 @@
role: single_dhcp
ens4:
role: single_ctl
+ single_address: ${_param:kubernetes_compute_node01_address}
+
+ cmp002:
+ reclass_storage_name: kubernetes_compute_node02
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:kubernetes_compute_node02_address}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index 27eb637..48e91fd 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -109,11 +109,6 @@
internal_proxy_enabled: 'False'
kqueen_custom_mail_enabled: 'False'
kqueen_enabled: 'False'
- kubernetes_compute_count: 2
- kubernetes_compute_rack01_deploy_subnet: 10.167.5
- kubernetes_compute_rack01_single_subnet: 10.167.4
- kubernetes_compute_rack01_tenant_subnet: 10.167.6
- kubernetes_compute_rack01_hostname: cmp
kubernetes_control_address: 10.167.4.10
kubernetes_control_node01_address: 10.167.4.11
kubernetes_control_node01_deploy_address: 10.167.5.11
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
index 66d7cec..4c01f4f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
@@ -109,9 +109,8 @@
ens4:
role: single_ctl
- # Generator-based computes. For compatibility only
- cmp<<count>>:
- reclass_storage_name: kubernetes_compute_rack01
+ cmp001:
+ reclass_storage_name: kubernetes_compute_node01
roles:
- kubernetes_compute
- linux_system_codename_xenial
@@ -121,3 +120,17 @@
role: single_dhcp
ens4:
role: single_ctl
+ single_address: ${_param:kubernetes_compute_node01_address}
+
+ cmp002:
+ reclass_storage_name: kubernetes_compute_node02
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:kubernetes_compute_node02_address}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
index eb9a3dc..130d95a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
@@ -18,16 +18,16 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-#- description: "Workaround for computes"
-# cmd: |
-# set -e;
-# . /root/venv-reclass-tools/bin/activate;
-# # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
-# reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-# reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-#
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
+- description: "Workaround for computes"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
+ reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+ reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/tests/system/test_failover_openstack_services.py b/tcp_tests/tests/system/test_failover_openstack_services.py
index d06c2af..eaa3377 100644
--- a/tcp_tests/tests/system/test_failover_openstack_services.py
+++ b/tcp_tests/tests/system/test_failover_openstack_services.py
@@ -234,7 +234,7 @@
else:
# Check that keepalived on other ctl nodes was not restarted
assert ps == ps_after[node_name], (
- "Keepalived was restarted while it shouldn't!")
+ "Keepalived was restarted while it shouldn't!")
# STEP #5
show_step(5)
@@ -337,7 +337,7 @@
else:
# Check that keepalived on other ctl nodes was not restarted
assert ps == ps_after[node_name], (
- "Keepalived was restarted while it shouldn't!")
+ "Keepalived was restarted while it shouldn't!")
# STEP #5
show_step(5)
# TODO(ddmitriev):
@@ -412,8 +412,8 @@
else:
# Check that haproxy on other ctl nodes was not restarted
assert ps == ps_after[node_name], (
- "Haproxy was restarted while it shouldn't on node {0}"
- .format(node_name))
+ "Haproxy was restarted while it shouldn't on node {0}"
+ .format(node_name))
# STEP #11
show_step(11)
@@ -488,7 +488,9 @@
LOG.info("Scheduling to kill rabbitmq on the minion {0}"
.format(ctl_minion))
underlay.delayed_call(
- "salt '{0}' cmd.run 'killall -9 -u rabbitmq'".format(ctl_minion),
+ "salt '{0}' cmd.run 'chmod -x "
+ "/usr/lib/rabbitmq/bin/rabbitmq-server "
+ "&& killall -9 -u rabbitmq'".format(ctl_minion),
host=config.salt.salt_master_host,
delay_min=2,
delay_max=3)
@@ -499,6 +501,8 @@
# STEP #3
show_step(3)
# Run rally task with created task file
+ rally.prepare_rally_task()
+ rally.run_container()
self.create_and_run_rally_load_task(
rally, times=60, concurrency=4, timeout=900)
@@ -519,7 +523,7 @@
# Check that rabbitmq_server on other ctl nodes
# was not restarted
assert ps == ps_after[node_name], (
- "'rabbitmq_server' was restarted while it shouldn't!")
+ "'rabbitmq_server' was restarted while it shouldn't!")
# Mysql case
# STEP #5
@@ -582,8 +586,8 @@
else:
# Check that Mysql on other ctl nodes was not restarted
assert ps == ps_after[node_name], (
- "Mysql was restarted while it shouldn't on node {0}"
- .format(node_name))
+ "Mysql was restarted while it shouldn't on node {0}"
+ .format(node_name))
# STEP #9
show_step(9)
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index eca7a20..69d8324 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -277,3 +277,25 @@
if settings.RUN_TEMPEST:
tempest_actions.prepare_and_run_tempest()
LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_dpdk_ovs_install(self, underlay,
+ openstack_deployed,
+ openstack_actions,
+ tempest_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest(dpdk=True)
+
+ LOG.info("*************** DONE **************")