Add MAKE_SNAPSHOT_STAGES job parameters
- add MAKE_SNAPSHOT_STAGES to the parent and children jobs,
True by default
- MAKE_SNAPSHOT_STAGES is always disabled for ENV_MANAGER='heat'
Change-Id: I2f1a8ae6cdc2f1be7a0d133d764ed3446305507a
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index e0e43ab..f9273e1 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -4,11 +4,18 @@
def shared = new com.mirantis.system_qa.SharedPipeline()
def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
def env_manager = env.ENV_MANAGER ?: 'devops'
-def jenkins_slave_node_name = "${NODE_NAME}"
+
+if (env_manager == 'devops') {
+ jenkins_slave_node_name = "${NODE_NAME}"
+ make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
+} else if (env_manager == 'heat') {
+ jenkins_slave_node_name = "openstack_slave_${JOB_NAME}"
+ make_snapshot_stages = false
+}
currentBuild.description = "${NODE_NAME}:${ENV_NAME}"
-def deploy(shared, common, steps, env_manager, jenkins_slave_node_name) {
+def deploy(shared, common, steps, env_manager) {
def report_text = ''
try {
@@ -19,15 +26,13 @@
stage("Create environment, generate model, bootstrap the salt-cluster") {
// steps: "hardware,create_model,salt"
if (env_manager == 'devops') {
- jenkins_slave_node_name = "${NODE_NAME}"
shared.swarm_bootstrap_salt_cluster_devops()
} else if (env_manager == 'heat') {
- def new_jenkins_slave_node_name = "openstack_slave_${JOB_NAME}"
// If shared.swarm_bootstrap_salt_cluster_heat() failed,
// do not schedule shared.swarm_testrail_report() on the non existing Jenkins slave
- shared.swarm_bootstrap_salt_cluster_heat(new_jenkins_slave_node_name)
+ shared.swarm_bootstrap_salt_cluster_heat(jenkins_slave_node_name)
// When the Heat stack created, set jenkins_slave_node_name to the new Jenkins slave
- jenkins_slave_node_name = new_jenkins_slave_node_name
+ // disable dos.py snapshots for 'heat' manager
} else {
throw new Exception("Unknow env_manager: '${env_manager}'")
}
@@ -36,7 +41,7 @@
stage("Install core infrastructure and deploy CICD nodes") {
if (env.DRIVETRAIN_STACK_INSTALL) {
// steps: env.DRIVETRAIN_STACK_INSTALL
- shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+ shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages)
} else {
common.printMsg("DRIVETRAIN_STACK_INSTALL is empty, skipping 'swarm-deploy-cicd' job", "green")
}
@@ -45,7 +50,7 @@
stage("Deploy platform components") {
if (env.PLATFORM_STACK_INSTALL) {
// steps: env.PLATFORM_STACK_INSTALL
- shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+ shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages)
} else {
common.printMsg("PLATFORM_STACK_INSTALL is empty, skipping 'swarm-deploy-platform' job", "green")
}
@@ -56,7 +61,7 @@
} catch (e) {
common.printMsg("Deploy is failed: " + e.message , "purple")
report_text = e.message
- if (env_manager == 'devops') {
+ if (make_snapshot_stages) {
def snapshot_name = "deploy_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
@@ -75,11 +80,11 @@
}
}
-def test(shared, common, steps, env_manager, jenkins_slave_node_name) {
+def test(shared, common, steps, env_manager) {
try {
stage("Run tests") {
if (env.RUN_TEST_OPTS) {
- shared.swarm_run_pytest(steps, jenkins_slave_node_name)
+ shared.swarm_run_pytest(steps, jenkins_slave_node_name, make_snapshot_stages)
} else {
common.printMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
}
@@ -87,7 +92,7 @@
} catch (e) {
common.printMsg("Tests are failed: " + e.message, "purple")
- if (env_manager == 'devops') {
+ if (make_snapshot_stages) {
def snapshot_name = "tests_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
@@ -110,14 +115,14 @@
node ("${NODE_NAME}") {
try {
// run deploy stages
- deploy(shared, common, steps, env_manager, jenkins_slave_node_name)
+ deploy(shared, common, steps, env_manager)
// run test stages
- test(shared, common, steps, env_manager, jenkins_slave_node_name)
+ test(shared, common, steps, env_manager)
} catch (e) {
common.printMsg("Job is failed: " + e.message, "purple")
throw e
} finally {
- if (env_manager == 'devops') {
+ if (make_snapshot_stages) {
// shutdown the environment if required
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
shared.run_cmd("""\
@@ -131,7 +136,7 @@
}
if ("${env.REPORT_TO_TESTRAIL}" != "false") {
stage("report results to testrail") {
- shared.swarm_testrail_report(steps, jenkins_slave_node_name)
+ shared.swarm_testrail_report(steps)
}
stage("Store TestRail reports to job description") {
def String description = readFile("description.txt")
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index fc12976..cfb080f 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -29,6 +29,7 @@
* OS_CREDENTIALS OpenStack username and password credentials ID in Jenkins
* LAB_PARAM_DEFAULTS Filename placed in tcp_tests/templates/_heat_environments, with default parameters for the heat template
*
+ * CREATE_JENKINS_NODE_CREDENTIALS Jenkins username and password with rights to add/delete Jenkins agents
*/
@Library('tcp-qa')_
@@ -246,6 +247,9 @@
stage("Clean the environment and clone tcp-qa") {
deleteDir()
+ shared.verbose_sh("""\
+ [ -d /home/jenkins/fuel-devops30 ] || virtualenv /home/jenkins/fuel-devops30
+ """, true, false, true)
shared.run_cmd("""\
git clone https://github.com/Mirantis/tcp-qa.git ${PARENT_WORKSPACE}
""")
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 1939b4d..b5e1ff7 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -11,6 +11,7 @@
* STACK_INSTALL_TIMEOUT Stacks installation timeout
* TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
* SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ * MAKE_SNAPSHOT_STAGES optional, use "dos.py snapshot" to snapshot stages
*
*/
@@ -18,6 +19,7 @@
common = new com.mirantis.mk.Common()
shared = new com.mirantis.system_qa.SharedPipeline()
+make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
if (! env.PARENT_NODE_NAME) {
error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
@@ -55,8 +57,11 @@
stage("Sanity check the deployed component [${stack}]") {
shared.sanity_check_component(stack)
}
- stage("Make environment snapshot [${stack}_deployed]") {
- shared.devops_snapshot(stack)
+
+ if (make_snapshot_stages) {
+ stage("Make environment snapshot [${stack}_deployed]") {
+ shared.devops_snapshot(stack)
+ }
}
} // for
@@ -68,10 +73,12 @@
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
// and report appropriate data to TestRail
// TODO(ddmitriev): add checks for cicd cluster
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ if (make_snapshot_stages) {
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
}
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index cb26aae..061e555 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -11,6 +11,7 @@
* STACK_INSTALL_TIMEOUT Stacks installation timeout
* TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
* SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ * MAKE_SNAPSHOT_STAGES optional, use "dos.py snapshot" to snapshot stages
*
*/
@@ -18,6 +19,7 @@
common = new com.mirantis.mk.Common()
shared = new com.mirantis.system_qa.SharedPipeline()
+make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
if (! env.PARENT_NODE_NAME) {
error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
@@ -55,8 +57,10 @@
stage("Sanity check the deployed component [${stack}]") {
shared.sanity_check_component(stack)
}
- stage("Make environment snapshot [${stack}_deployed]") {
- shared.devops_snapshot(stack)
+ if (make_snapshot_stages) {
+ stage("Make environment snapshot [${stack}_deployed]") {
+ shared.devops_snapshot(stack)
+ }
}
} // for
@@ -68,10 +72,12 @@
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
// and report appropriate data to TestRail
// TODO(ddmitriev): add checks for the installed stacks
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ if (make_snapshot_stages) {
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
}
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 994cc70..1e4c849 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -18,12 +18,14 @@
* IMAGE_PATH_CFG01_DAY01 Not used (backward compatibility, for manual deployment steps only)
* TEMPEST_IMAGE_VERSION Tempest image version: pike by default, can be queens.
* TEMPEST_TARGET Node where tempest will be run
+ * MAKE_SNAPSHOT_STAGES optional, use "dos.py snapshot" to snapshot stages
*/
@Library('tcp-qa')_
common = new com.mirantis.mk.Common()
shared = new com.mirantis.system_qa.SharedPipeline()
+make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
if (! env.PARENT_NODE_NAME) {
error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
@@ -77,16 +79,19 @@
def snapshot_name = "test_completed"
shared.download_logs("test_completed_${ENV_NAME}")
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME}
- dos.py snapshot ${ENV_NAME} ${snapshot_name}
- """)
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+
+ if (make_snapshot_stages) {
shared.run_cmd("""\
- dos.py resume ${ENV_NAME}
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} ${snapshot_name}
""")
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME}
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
- shared.devops_snapshot_info(snapshot_name)
}
} catch (e) {
@@ -98,10 +103,12 @@
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
// and report appropriate data to TestRail
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ if (make_snapshot_stages) {
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
}
}