Fix cicd jobs
- use XML escape for '<' and '>' for all deployment reports
- revert the environment after pytest if
SHUTDOWN_ENV_ON_TEARDOWN=false
- change error messages color to 'purple' to differ from Groovy
traceback messages
- add helper message for reverting devops snapshots
Related task: PROD-21660
Change-Id: Ic2c5b0b69b647a0c6607cf45f6a60de1ec28ee6a
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 3e96c84..55dda48 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -32,21 +32,19 @@
currentBuild.result = 'SUCCESS'
} catch (e) {
- common.printMsg("Deploy is failed: " + e.message , "red")
+ common.printMsg("Deploy is failed: " + e.message , "purple")
+ report_text = e.message
+ def snapshot_name = "deploy_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} deploy_failed || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
- } else {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
}
- report_text = e.message
+ shared.devops_snapshot_info(snapshot_name)
throw e
} finally {
shared.create_deploy_result_report(steps, currentBuild.result, report_text)
@@ -60,22 +58,19 @@
}
} catch (e) {
- common.printMsg("Tests are failed: " + e.message, "red")
+ common.printMsg("Tests are failed: " + e.message, "purple")
+ def snapshot_name = "tests_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} tests_failed || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
- throw e
- } finally {
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
- } else {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
}
+ shared.devops_snapshot_info(snapshot_name)
+ throw e
}
}
@@ -83,12 +78,21 @@
throttle(['fuel_devops_environment']) {
node ("${NODE_NAME}") {
try {
+ // run deploy stages
deploy(shared, common, steps)
+ // run test stages
test(shared, common, steps)
} catch (e) {
- common.printMsg("Job is failed: " + e.message, "red")
+ common.printMsg("Job is failed: " + e.message, "purple")
throw e
} finally {
+ // shutdown the environment if required
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ }
+ // report results to testrail
shared.swarm_testrail_report(steps)
}
}
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index ce32c24..64c8783 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -103,7 +103,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 538f5ea..5ace2ca 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -66,7 +66,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 78e363f..9a6b1d1 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -66,7 +66,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 5d7bd8d..0dd2d7a 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -69,13 +69,23 @@
py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
- dos.py suspend ${ENV_NAME}
- dos.py snapshot ${ENV_NAME} test_completed
""")
+
+ def snapshot_name = "test_completed"
+ shared.run_cmd("""\
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} ${snapshot_name}
+ """)
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME}
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 3da7c04..c43b3bb 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -133,7 +133,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// reporting is failed for some reason
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 7f002ed..dcf05da 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -55,7 +55,7 @@
if (job_info.getResult() != "SUCCESS") {
currentBuild.result = job_info.getResult()
def build_number = job_info.getNumber()
- common.printMsg("Job '${job_name}' failed, getting details", "red")
+ common.printMsg("Job '${job_name}' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
export JOB_NAME=${job_name}
export BUILD_NUMBER=${build_number}
@@ -83,7 +83,7 @@
def job_url = "${build_url}"
currentBuild.result = build_status
if (junit_report_filename) {
- common.printMsg("Job '${job_url}' failed with status ${build_status}, getting details", "red")
+ common.printMsg("Job '${job_url}' failed with status ${build_status}, getting details", "purple")
step($class: 'hudson.plugins.copyartifact.CopyArtifact',
projectName: job_name,
selector: specific("${build_number}"),
@@ -94,10 +94,8 @@
def String junit_report_xml = readFile("${junit_report_filename}")
def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
- // Replace '<' and '>' to '<' and '>' to avoid conflicts between xml tags in the message and JUnit report
- def String junit_report_xml_filtered = junit_report_xml_pretty.replaceAll("<","<").replaceAll(">", ">")
def String msg = "Job '${job_url}' failed with status ${build_status}, JUnit report:\n"
- throw new Exception(msg + junit_report_xml_filtered)
+ throw new Exception(msg + junit_report_xml_pretty)
} else {
throw new Exception("Job '${job_url}' failed with status ${build_status}, please check the console output.")
}
@@ -340,7 +338,7 @@
""")
} catch (e) {
def common = new com.mirantis.mk.Common()
- common.printMsg("Product job 'deploy_openstack' failed, getting details", "red")
+ common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
. ./tcp_tests/utils/env_salt
. ./tcp_tests/utils/env_jenkins_day01
@@ -371,7 +369,7 @@
""")
} catch (e) {
def common = new com.mirantis.mk.Common()
- common.printMsg("Product job 'deploy_openstack' failed, getting details", "red")
+ common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
. ./tcp_tests/utils/env_salt
. ./tcp_tests/utils/env_jenkins_cicd
@@ -398,6 +396,31 @@
}
}
+def devops_snapshot_info(snapshot_name) {
+ // Print helper message after snapshot
+ def common = new com.mirantis.mk.Common()
+
+ def SALT_MASTER_IP=run_cmd_stdout("""\
+ . ./tcp_tests/utils/env_salt
+ echo \$SALT_MASTER_IP
+ """).trim().split().last()
+ def login = "root" // set fixed 'root' login for now
+ def password = "r00tme" // set fixed 'root' login for now
+ def key_file = "${env.WORKSPACE}/id_rsa" // set fixed path in the WORKSPACE
+ def VENV_PATH='/home/jenkins/fuel-devops30'
+
+ common.printMsg("""\
+#########################
+# To revert the snapshot:
+#########################
+. ${VENV_PATH}/bin/activate;
+dos.py revert ${ENV_NAME} ${snapshot_name};
+dos.py resume ${ENV_NAME};
+# dos.py time-sync ${ENV_NAME}; # Optional\n
+ssh -i ${key_file} ${login}@${SALT_MASTER_IP} # Optional password: ${password}
+""", "cyan")
+}
+
def devops_snapshot(stack) {
// Make the snapshot with name "${stack}_deployed"
// for all VMs in the environment.
@@ -418,6 +441,7 @@
cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
fi
""")
+ devops_snapshot_info("${stack}_deployed")
}
def get_steps_list(steps) {
@@ -429,11 +453,15 @@
// <filename> is name of the XML report file that will be created
// <status> is one of the 'success', 'skipped', 'failure' or 'error'
// 'error' status is assumed as 'Blocker' in TestRail reporter
+
+ // Replace '<' and '>' to '<' and '>' to avoid conflicts between xml tags in the message and JUnit report
+ def String text_filtered = text.replaceAll("<","<").replaceAll(">", ">")
+
def script = """\
<?xml version=\"1.0\" encoding=\"utf-8\"?>
<testsuite>
<testcase classname=\"${classname}\" name=\"${name}\" time=\"0\">
- <${status} message=\"${status_message}\">${text}</${status}>
+ <${status} message=\"${status_message}\">${text_filtered}</${status}>
<system-out>${stdout}</system-out>
<system-err>${stderr}</system-err>
</testcase>