Add possibility to use ENV_MANAGER=heat in test pipelines
- If ENV_MANAGER=heat, environment snapshots are unavailable.
Carefully select the test cases for such environments.
- Added a new job swarm-bootstrap-salt-cluster-heat.groovy
to create environment in OpenStack
- Added new parameters for parent jobs:
ENV_MANAGER (default=devops)
OS_AUTH_URL (for ENV_MANAGER=heat) - Keystone URL
OS_PROJECT_NAME (for ENV_MANAGER=heat) - OS project name
OS_USER_DOMAIN_NAME (for ENV_MANAGER=heat) - OS user domain name
OS_CREDENTIALS (for ENV_MANAGER=heat) - Jenkins credentials
with username and password to access OpenStack
LAB_PARAM_DEFAULTS (for ENV_MANAGER=heat) - environment file
for Heat template with 'parameter_defaults' dict.
- Added requirements 'python-openstackclient' and 'python-glanceclient'
to operate images and heat stack from Jenkins pipeline scripts.
Related-task: #PROD-27687
Change-Id: I5b3a2fa3aac0bf3d592efa3617e25b8a965f377f
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index f5c5b9a..e0e43ab 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -3,30 +3,52 @@
def common = new com.mirantis.mk.Common()
def shared = new com.mirantis.system_qa.SharedPipeline()
def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+def env_manager = env.ENV_MANAGER ?: 'devops'
+def jenkins_slave_node_name = "${NODE_NAME}"
currentBuild.description = "${NODE_NAME}:${ENV_NAME}"
-def deploy(shared, common, steps) {
+def deploy(shared, common, steps, env_manager, jenkins_slave_node_name) {
def report_text = ''
try {
stage("Clean the environment and clone tcp-qa") {
- shared.prepare_working_dir()
+ shared.prepare_working_dir(env_manager)
}
stage("Create environment, generate model, bootstrap the salt-cluster") {
// steps: "hardware,create_model,salt"
- shared.swarm_bootstrap_salt_cluster_devops()
+ if (env_manager == 'devops') {
+ jenkins_slave_node_name = "${NODE_NAME}"
+ shared.swarm_bootstrap_salt_cluster_devops()
+ } else if (env_manager == 'heat') {
+ def new_jenkins_slave_node_name = "openstack_slave_${JOB_NAME}"
+ // If shared.swarm_bootstrap_salt_cluster_heat() failed,
+ // do not schedule shared.swarm_testrail_report() on the non existing Jenkins slave
+ shared.swarm_bootstrap_salt_cluster_heat(new_jenkins_slave_node_name)
+ // When the Heat stack created, set jenkins_slave_node_name to the new Jenkins slave
+ jenkins_slave_node_name = new_jenkins_slave_node_name
+ } else {
+ throw new Exception("Unknow env_manager: '${env_manager}'")
+ }
}
stage("Install core infrastructure and deploy CICD nodes") {
- // steps: env.DRIVETRAIN_STACK_INSTALL
- shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT)
+ if (env.DRIVETRAIN_STACK_INSTALL) {
+ // steps: env.DRIVETRAIN_STACK_INSTALL
+ shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+ } else {
+ common.printMsg("DRIVETRAIN_STACK_INSTALL is empty, skipping 'swarm-deploy-cicd' job", "green")
+ }
}
stage("Deploy platform components") {
- // steps: env.PLATFORM_STACK_INSTALL
- shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
+ if (env.PLATFORM_STACK_INSTALL) {
+ // steps: env.PLATFORM_STACK_INSTALL
+ shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+ } else {
+ common.printMsg("PLATFORM_STACK_INSTALL is empty, skipping 'swarm-deploy-platform' job", "green")
+ }
}
currentBuild.result = 'SUCCESS'
@@ -34,42 +56,50 @@
} catch (e) {
common.printMsg("Deploy is failed: " + e.message , "purple")
report_text = e.message
- def snapshot_name = "deploy_failed"
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
- """)
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ if (env_manager == 'devops') {
+ def snapshot_name = "deploy_failed"
shared.run_cmd("""\
- dos.py resume ${ENV_NAME} || true
+ dos.py suspend ${ENV_NAME} || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME} || true
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
- shared.devops_snapshot_info(snapshot_name)
throw e
} finally {
shared.create_deploy_result_report(steps, currentBuild.result, report_text)
}
}
-def test(shared, common, steps) {
+def test(shared, common, steps, env_manager, jenkins_slave_node_name) {
try {
stage("Run tests") {
- shared.swarm_run_pytest(steps)
+ if (env.RUN_TEST_OPTS) {
+ shared.swarm_run_pytest(steps, jenkins_slave_node_name)
+ } else {
+ common.printMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
+ }
}
} catch (e) {
common.printMsg("Tests are failed: " + e.message, "purple")
- def snapshot_name = "tests_failed"
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
- """)
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ if (env_manager == 'devops') {
+ def snapshot_name = "tests_failed"
shared.run_cmd("""\
- dos.py resume ${ENV_NAME} || true
+ dos.py suspend ${ENV_NAME} || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME} || true
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
- shared.devops_snapshot_info(snapshot_name)
throw e
}
}
@@ -80,18 +110,20 @@
node ("${NODE_NAME}") {
try {
// run deploy stages
- deploy(shared, common, steps)
+ deploy(shared, common, steps, env_manager, jenkins_slave_node_name)
// run test stages
- test(shared, common, steps)
+ test(shared, common, steps, env_manager, jenkins_slave_node_name)
} catch (e) {
common.printMsg("Job is failed: " + e.message, "purple")
throw e
} finally {
- // shutdown the environment if required
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
+ if (env_manager == 'devops') {
+ // shutdown the environment if required
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ }
}
stage("Archive all xml reports") {
@@ -99,7 +131,7 @@
}
if ("${env.REPORT_TO_TESTRAIL}" != "false") {
stage("report results to testrail") {
- shared.swarm_testrail_report(steps)
+ shared.swarm_testrail_report(steps, jenkins_slave_node_name)
}
stage("Store TestRail reports to job description") {
def String description = readFile("description.txt")