The reason of failed jenkins job in cluster-under-test will be shown in a test result
PROD-36050
Change-Id: I1160d46e99751f4e714c459e3d07110958c913e3
diff --git a/tcp_tests/tests/system/test_security_updates.py b/tcp_tests/tests/system/test_security_updates.py
index 0e83990..b8242e9 100644
--- a/tcp_tests/tests/system/test_security_updates.py
+++ b/tcp_tests/tests/system/test_security_updates.py
@@ -72,12 +72,13 @@
:param dt: DrivetrainManager, tcp-qa Drivetrain manager instance
:return: str, build execution status of cvp-sanity pipeline
"""
- return dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.SANITY_JOB_NAME,
job_parameters=self.SANITY_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=60 * 15
)
+ assert job_result == "SUCCESS", job_description
def reboot_hw_node(self, ssh, salt, node):
"""Reboot the given node and wait for it to start back
@@ -128,16 +129,16 @@
# Execute 'deploy-update-package' pipeline to upgrade packages on nodes
show_step(2)
self.UPDATE_JOB_PARAMETERS["TARGET_SERVERS"] = role
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.UPDATE_JOB_NAME,
job_parameters=self.UPDATE_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=60 * 15
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{}' job run status is {} after upgrading packages on {} nodes. "
- "Please check the build and executed stages.".format(
- self.UPDATE_JOB_NAME, status, role)
+ "Please check the build and executed stages {}".format(
+ self.UPDATE_JOB_NAME, job_result, role, job_description)
)
# Collect available package upgrades for nodes again