The reason of failed jenkins job in cluster-under-test will be shown in a test result
PROD-36050
Change-Id: I1160d46e99751f4e714c459e3d07110958c913e3
diff --git a/tcp_tests/managers/drivetrain_manager.py b/tcp_tests/managers/drivetrain_manager.py
index 3851e8d..f65892e 100644
--- a/tcp_tests/managers/drivetrain_manager.py
+++ b/tcp_tests/managers/drivetrain_manager.py
@@ -49,11 +49,14 @@
Method accept any param:
job_parameters=None,
job_output_prefix='',
+ jenkins_tgt='I@salt:master' or
+ 'I@docker:client:stack:jenkins and cid01*'
start_timeout=1800,
build_timeout=3600 * 4,
verbose=False
:param job_name: string
+ :param jenkins_tgt: string, node to run jenkins in salt-style
:return: string, Result of passed job, "SUCCESS"| "FAILED" | "UNSTABLE"
"""
jenkins_url, jenkins_user, jenkins_pass = self.get_jenkins_creds(
@@ -76,12 +79,13 @@
LOG.info(description)
LOG.info('\n'.join(stages))
+ job_description = "{description}. \n\n{stages}"\
+ .format(description=description,
+ stages='\n'.join(stages))
+
if job_result != 'SUCCESS':
LOG.warning("{0}\n{1}".format(description, '\n'.join(stages)))
- return job_result
-
- def start_job_on_cfg_jenkins(self):
- pass
+ return job_result, job_description
def get_jenkins_creds(self, tgt):
"""
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
index dddfdcc..c47f8aa 100644
--- a/tcp_tests/tests/system/test_backup_restore.py
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -388,16 +388,16 @@
# Execute 'backupninja_backup' pipeline to create a backup
show_step(1)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.BACKUP_JOB_NAME,
job_parameters=self.BACKUP_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{}' job run status is {} after creating Salt master backup. "
"Please check the build and executed stages.".format(
- self.BACKUP_JOB_NAME, status)
+ self.BACKUP_JOB_NAME, job_description)
)
# Verify that backup is created and all pieces of data are rsynced
@@ -414,16 +414,16 @@
# Restore the backup
show_step(4)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.RESTORE_JOB_NAME,
job_parameters=self.RESTORE_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{}' job run status is {} after restoring from Salt master "
"backup. Please check the build and executed stages.".format(
- self.RESTORE_JOB_NAME, status)
+ self.RESTORE_JOB_NAME, job_description)
)
# Verify that all pieces of lost/changed data are restored
@@ -659,16 +659,16 @@
# Execute 'backupninja_backup' pipeline to create a backup
show_step(1)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.BACKUP_JOB_NAME,
job_parameters=self.BACKUP_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{}' job run status is {} after creating MAAS data backup. "
"Please check the build and executed stages.".format(
- self.BACKUP_JOB_NAME, status)
+ self.BACKUP_JOB_NAME, job_description)
)
# Verify that backup is created and all pieces of data are rsynced
@@ -685,16 +685,16 @@
# Restore the backup
show_step(4)
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.RESTORE_JOB_NAME,
job_parameters=self.RESTORE_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=self.JENKINS_BUILD_TIMEOUT
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{}' job run status is {} after restoring from MAAS "
"backup. Please check the build and executed stages.".format(
- self.RESTORE_JOB_NAME, status)
+ self.RESTORE_JOB_NAME, job_description)
)
# Verify that all pieces of lost/changed data are restored
diff --git a/tcp_tests/tests/system/test_backup_restore_cassandra.py b/tcp_tests/tests/system/test_backup_restore_cassandra.py
index bbbc6f3..5dd695c 100644
--- a/tcp_tests/tests/system/test_backup_restore_cassandra.py
+++ b/tcp_tests/tests/system/test_backup_restore_cassandra.py
@@ -164,12 +164,12 @@
salt.run_state("I@jenkins:client", "jenkins.client")
show_step(4)
job_name = "deploy-cassandra-db-restore"
- run_cassandra_restore = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
start_timeout=jenkins_start_timeout,
build_timeout=jenkins_build_timeout,
job_name=job_name)
- assert run_cassandra_restore == "SUCCESS"
+ assert job_result == "SUCCESS", job_description
network_presented = self.is_network_restored(
underlay_actions,
fixture_network_name,
diff --git a/tcp_tests/tests/system/test_backup_restore_galera.py b/tcp_tests/tests/system/test_backup_restore_galera.py
index 4480f88..49f6234 100644
--- a/tcp_tests/tests/system/test_backup_restore_galera.py
+++ b/tcp_tests/tests/system/test_backup_restore_galera.py
@@ -98,11 +98,11 @@
job_parameters = {
'ASK_CONFIRMATION': False
}
- backup_galera_pipeline = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert backup_galera_pipeline == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ######################## Run CPV ###########################
show_step(3)
@@ -122,11 +122,11 @@
'test_ceph_status', 'test_prometheus_alert_count',
'test_uncommited_changes')
}
- run_cvp_sanity = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_cvp_sanity_parameters)
- assert run_cvp_sanity == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ######################## Run Tempest ###########################
show_step(4)
@@ -134,11 +134,11 @@
job_parameters = {
'TEMPEST_ENDPOINT_TYPE': 'internalURL'
}
- run_cvp_tempest = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert run_cvp_tempest == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
show_step(5)
self.create_flavor(underlay_actions, fixture_flavor2, cfg_node)
# ######################## Run Restore ###########################
@@ -148,11 +148,11 @@
'RESTORE_TYPE': 'ONLY_RESTORE',
'ASK_CONFIRMATION': False
}
- run_galera_verify_restore = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert run_galera_verify_restore == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
assert self.is_flavor_restored(underlay_actions,
fixture_flavor1,
@@ -164,20 +164,20 @@
show_step(7)
job_name = 'cvp-sanity'
- run_cvp_sanity = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_cvp_sanity_parameters)
- assert run_cvp_sanity == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ######################## Run Tempest ###########################
show_step(8)
job_name = 'cvp-tempest'
job_parameters = {
'TEMPEST_ENDPOINT_TYPE': 'internalURL'
}
- run_cvp_tempest = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert run_cvp_tempest == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
self.delete_flavor(underlay_actions, fixture_flavor1, cfg_node)
diff --git a/tcp_tests/tests/system/test_backup_restore_zookeeper.py b/tcp_tests/tests/system/test_backup_restore_zookeeper.py
index b06f1f8..cd04eb9 100644
--- a/tcp_tests/tests/system/test_backup_restore_zookeeper.py
+++ b/tcp_tests/tests/system/test_backup_restore_zookeeper.py
@@ -175,11 +175,11 @@
show_step(4)
job_name = 'deploy-zookeeper-restore'
- run_zookeeper_restore = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
start_timeout=jenkins_start_timeout,
build_timeout=jenkins_build_timeout,
job_name=job_name)
- assert run_zookeeper_restore == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
network_presented = self.is_network_restored(
underlay_actions,
fixture_network_name,
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index cfff2b5..30fdd8d 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -106,11 +106,11 @@
'HOST': 'xtra*',
'HOST_TYPE': 'osd'
}
- add_node_pipeline = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters,
verbose=True)
- assert add_node_pipeline == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
def test_added_node(self):
# root@osd001:~# ceph osd tree in
@@ -142,11 +142,11 @@
'HOST': 'xtra*',
'HOST_TYPE': 'osd'
}
- remove_node_pipeline = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters,
verbose=True)
- assert remove_node_pipeline == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
class TestCephMon(object):
diff --git a/tcp_tests/tests/system/test_cvp_pipelines.py b/tcp_tests/tests/system/test_cvp_pipelines.py
index 3f89586..41c95fe 100644
--- a/tcp_tests/tests/system/test_cvp_pipelines.py
+++ b/tcp_tests/tests/system/test_cvp_pipelines.py
@@ -152,7 +152,7 @@
'TEMPEST_ENDPOINT_TYPE': 'internalURL',
'TEMPEST_TEST_PATTERN': tempest_pattern,
}
- cvp_tempest_result = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name,
jenkins_tgt='I@docker:client:stack:jenkins and I@salt:master',
start_timeout=jenkins_start_timeout,
@@ -160,7 +160,7 @@
verbose=True,
job_parameters=job_parameters,
job_output_prefix='[ {job_name}/{build_number}:platform {time} ] ')
- LOG.info('Job {0} result: {1}'.format(job_name, cvp_tempest_result))
+ LOG.info('Job {0} result: {1}'.format(job_name, job_result))
show_step(4)
tempest_actions.fetch_arficats(
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 23cd61f..72ed442 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -189,12 +189,12 @@
job_parameters = {
'BRANCHES': 'release/proposed/2019.2.0'
}
- update_pipelines = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters,
verbose=True)
- assert update_pipelines == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ################### Downstream pipeline-library ####################
show_step(3)
@@ -202,12 +202,12 @@
job_parameters = {
'BRANCHES': 'release/proposed/2019.2.0'
}
- update_pipeline_library = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters,
verbose=True)
- assert update_pipeline_library == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ################### Start 'Deploy - upgrade MCP Drivetrain' job #####
show_step(4)
@@ -218,13 +218,13 @@
'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
'TARGET_MCP_VERSION': '2019.2.0'
}
- update_drivetrain = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters,
verbose=True,
build_timeout=3 * 60 * 60)
- assert update_drivetrain == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
@pytest.mark.grab_versions
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@@ -259,11 +259,11 @@
show_step(3)
job_name = 'update-glusterfs'
- update_glusterfs = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
build_timeout=40 * 60)
- assert update_glusterfs == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ################ Check GlusterFS version for servers ##############
show_step(4)
@@ -331,12 +331,12 @@
'INTERACTIVE': 'false'
}
- update_galera = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters,
build_timeout=40 * 60)
- assert update_galera == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
@pytest.fixture
def disable_automatic_failover_neutron_for_test(self, salt_actions):
@@ -465,12 +465,12 @@
'INTERACTIVE': 'false'
}
- update_rabbit = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name='deploy-upgrade-rabbitmq',
job_parameters=job_parameters,
build_timeout=40 * 60
)
- assert update_rabbit == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
@pytest.mark.grab_versions
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@@ -496,11 +496,11 @@
show_step(2)
job_parameters = {}
- update_ceph = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name='ceph-update',
job_parameters=job_parameters)
- assert update_ceph == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ########## Verify Ceph version #####################################
show_step(3)
@@ -526,11 +526,11 @@
"STAGE_UPGRADE_ES_KIBANA": True,
"STAGE_UPGRADE_SYSTEM_PART": True
}
- upgrade_control_pipeline = drivetrain.start_job_on_jenkins(
+ job_result, job_description = drivetrain.start_job_on_jenkins(
job_name="stacklight-upgrade",
job_parameters=job_parameters)
- assert upgrade_control_pipeline == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
@pytest.mark.usefixtures("switch_to_proposed_pipelines",
@@ -579,11 +579,11 @@
"UPGRADE_SALTSTACK": False,
"OS_UPGRADE": True,
"INTERACTIVE": False}
- upgrade_control_pipeline = drivetrain_actions.start_job_on_jenkins(
+ job_result, job_description = drivetrain_actions.start_job_on_jenkins(
job_name="deploy-upgrade-control",
job_parameters=job_parameters)
- assert upgrade_control_pipeline == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
@pytest.mark.grab_versions
@pytest.mark.run_mcp_update
@@ -596,11 +596,11 @@
"OS_DIST_UPGRADE": True,
"OS_UPGRADE": True,
"INTERACTIVE": False}
- upgrade_data_pipeline = drivetrain_actions.start_job_on_jenkins(
+ job_result, job_description = drivetrain_actions.start_job_on_jenkins(
job_name="deploy-upgrade-ovs-gateway",
job_parameters=job_parameters)
- assert upgrade_data_pipeline == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
@pytest.mark.grab_versions
@pytest.mark.run_mcp_update
@@ -611,8 +611,8 @@
"OS_DIST_UPGRADE": True,
"OS_UPGRADE": True,
"INTERACTIVE": False}
- upgrade_compute_pipeline = drivetrain_actions.start_job_on_jenkins(
+ job_result, job_description = drivetrain_actions.start_job_on_jenkins(
job_name="deploy-upgrade-compute",
job_parameters=job_parameters)
- assert upgrade_compute_pipeline == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
diff --git a/tcp_tests/tests/system/test_security_updates.py b/tcp_tests/tests/system/test_security_updates.py
index 0e83990..b8242e9 100644
--- a/tcp_tests/tests/system/test_security_updates.py
+++ b/tcp_tests/tests/system/test_security_updates.py
@@ -72,12 +72,13 @@
:param dt: DrivetrainManager, tcp-qa Drivetrain manager instance
:return: str, build execution status of cvp-sanity pipeline
"""
- return dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.SANITY_JOB_NAME,
job_parameters=self.SANITY_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=60 * 15
)
+ assert job_result == "SUCCESS", job_description
def reboot_hw_node(self, ssh, salt, node):
"""Reboot the given node and wait for it to start back
@@ -128,16 +129,16 @@
# Execute 'deploy-update-package' pipeline to upgrade packages on nodes
show_step(2)
self.UPDATE_JOB_PARAMETERS["TARGET_SERVERS"] = role
- status = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=self.UPDATE_JOB_NAME,
job_parameters=self.UPDATE_JOB_PARAMETERS,
start_timeout=self.JENKINS_START_TIMEOUT,
build_timeout=60 * 15
)
- assert status == 'SUCCESS', (
+ assert job_result == 'SUCCESS', (
"'{}' job run status is {} after upgrading packages on {} nodes. "
- "Please check the build and executed stages.".format(
- self.UPDATE_JOB_NAME, status, role)
+ "Please check the build and executed stages {}".format(
+ self.UPDATE_JOB_NAME, job_result, role, job_description)
)
# Collect available package upgrades for nodes again
diff --git a/tcp_tests/tests/system/test_upgrade_contrail.py b/tcp_tests/tests/system/test_upgrade_contrail.py
index dee3148..e853d04 100644
--- a/tcp_tests/tests/system/test_upgrade_contrail.py
+++ b/tcp_tests/tests/system/test_upgrade_contrail.py
@@ -44,7 +44,7 @@
job_parameters = {
'ASK_CONFIRMATION': False
}
- update_control_vms = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert update_control_vms == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
diff --git a/tcp_tests/tests/system/test_upgrade_pike_queens.py b/tcp_tests/tests/system/test_upgrade_pike_queens.py
index 3ea2d23..87616eb 100644
--- a/tcp_tests/tests/system/test_upgrade_pike_queens.py
+++ b/tcp_tests/tests/system/test_upgrade_pike_queens.py
@@ -200,33 +200,33 @@
}
# ####### Run job for ctl* ###
job_parameters["TARGET_SERVERS"] = "ctl*"
- update_control_vms = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert update_control_vms == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
if salt_actions.cmd_run("mdb*", "test.ping")[0].keys():
# ####### Run job for mdb* ###
job_parameters["TARGET_SERVERS"] = "mdb*"
- update_control_vms = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert update_control_vms == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
if salt_actions.cmd_run("kmn*", "test.ping")[0].keys():
# ####### Run job for kmn* ###
job_parameters["TARGET_SERVERS"] = "kmn*"
- update_control_vms = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert update_control_vms == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ####### Run job for prx* ###
job_parameters["TARGET_SERVERS"] = "prx*"
- update_control_vms = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert update_control_vms == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ########## Upgrade gateway nodes ###########
show_step(3)
@@ -239,10 +239,10 @@
'OS_UPGRADE': True,
'TARGET_SERVERS': "gtw*"
}
- update_gateway = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert update_gateway == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
else:
LOG.info("This deployment doesn't have gtw* nodes, \
so skip this step")
@@ -256,10 +256,10 @@
'OS_UPGRADE': True,
'TARGET_SERVERS': "cmp*"
}
- update_computes = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert update_computes == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ############ Perform the post-upgrade activities ##########
show_step(5)
@@ -296,17 +296,17 @@
'test_ceph_status', 'test_prometheus_alert_count',
'test_uncommited_changes')
}
- run_cvp_sanity = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert run_cvp_sanity == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description
# ######################## Run Tempest #######################
show_step(7)
job_name = 'cvp-tempest'
job_parameters = {
'TEMPEST_ENDPOINT_TYPE': 'internalURL'
}
- run_cvp_tempest = dt.start_job_on_jenkins(
+ job_result, job_description = dt.start_job_on_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- assert run_cvp_tempest == 'SUCCESS'
+ assert job_result == 'SUCCESS', job_description