blob: 002c4217b64afe4f5ae9b736e742ba121354c214 [file] [log] [blame]
/*
* DEPLOY_EMPTY_NODE Add extra node to heat stack. Node without a role and with salt-minion
*/
@Library('tcp-qa')_
def common = new com.mirantis.mk.Common()
def shared = new com.mirantis.system_qa.SharedPipeline()
def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
def env_manager = env.ENV_MANAGER ?: 'devops'
def batch_size = env.BATCH_SIZE ?: ''
def dist_upgrade_nodes = "${env.DIST_UPGRADE_NODES}" != "false" ? true : false
def upgrade_saltstack = "${env.UPGRADE_SALTSTACK}" != "false" ? true : false
def upgrade_version_tag = "${env.UPGRADE_VERSION_TAG}"
if (env_manager == 'devops') {
jenkins_slave_node_name = "${NODE_NAME}"
node_with_reports = "${NODE_NAME}"
make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
} else if (env_manager == 'heat') {
jenkins_slave_node_name = "openstack_slave_${env.ENV_NAME}"
make_snapshot_stages = false
node_with_reports = jenkins_slave_node_name
}
currentBuild.description = "${NODE_NAME}:${ENV_NAME}<br>"
def deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes, upgrade_saltstack) {
def report_text = ''
try {
stage("Clean the environment and clone tcp-qa") {
shared.prepare_working_dir(env_manager)
}
if (env.IPMI_CREDS) {
withCredentials([
[$class : 'UsernamePasswordMultiBinding',
credentialsId : "${IPMI_CREDS}",
passwordVariable: 'IPMI_PASS',
usernameVariable: 'IPMI_USER']
]) {
env.IPMI_PASS = IPMI_PASS
env.IPMI_USER = IPMI_USER
shared.reboot_hardware_nodes("${IPMI_CREDS}")
}
}
stage("Create environment, generate model, bootstrap the salt-cluster") {
// steps: "hardware,create_model,salt"
if (env_manager == 'devops') {
shared.swarm_bootstrap_salt_cluster_devops()
} else if (env_manager == 'heat') {
// If shared.swarm_bootstrap_salt_cluster_heat() failed,
// do not schedule shared.swarm_testrail_report() on the non existing Jenkins slave
shared.swarm_bootstrap_salt_cluster_heat(jenkins_slave_node_name)
// When the Heat stack created, set jenkins_slave_node_name to the new Jenkins slave
// disable dos.py snapshots for 'heat' manager
} else {
throw new Exception("Unknow env_manager: '${env_manager}'")
}
if (fileExists("jenkins_agent_description.txt")) {
def String jenkins_agent_description = readFile("jenkins_agent_description.txt")
currentBuild.description += "${jenkins_agent_description}"
}
}
stage("Install core infrastructure and deploy CICD nodes") {
if (env.DRIVETRAIN_STACK_INSTALL) {
// steps: env.DRIVETRAIN_STACK_INSTALL
shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages, batch_size, dist_upgrade_nodes, upgrade_saltstack)
} else {
common.printMsg("DRIVETRAIN_STACK_INSTALL is empty, skipping 'swarm-deploy-cicd' job", "green")
}
}
stage("Deploy platform components") {
if (env.PLATFORM_STACK_INSTALL) {
// steps: env.PLATFORM_STACK_INSTALL
shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages, batch_size, dist_upgrade_nodes, upgrade_saltstack)
} else {
common.printMsg("PLATFORM_STACK_INSTALL is empty, skipping 'swarm-deploy-platform' job", "green")
}
}
currentBuild.result = 'SUCCESS'
} catch (e) {
common.printMsg("Deploy is failed: " + e.message , "purple")
report_text = e.message
if (make_snapshot_stages) {
def snapshot_name = "deploy_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
}
shared.devops_snapshot_info(snapshot_name)
}
throw e
} finally {
shared.create_deploy_result_report(steps, currentBuild.result, report_text)
}
}
def test(shared, common, steps, env_manager) {
try {
stage("Run tests") {
if (env.RUN_TEST_OPTS) {
shared.swarm_run_pytest(steps, jenkins_slave_node_name, make_snapshot_stages)
} else {
common.printMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
}
}
} catch (e) {
common.printMsg("Tests are failed: " + e.message, "purple")
if (make_snapshot_stages) {
def snapshot_name = "tests_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
}
shared.devops_snapshot_info(snapshot_name)
}
throw e
}
}
timeout(time: 23, unit: 'HOURS') {
// main
// Temporary disable throttle to check how it will run
//throttle(['fuel_devops_environment']) {
node ("${NODE_NAME}") {
env.slave_workdir = pwd()
def extra_vars = EXTRA_VARS ?: ''
if (extra_vars != '') {
extra_vars = extra_vars.split('\n').collect{it as String}
}
else {
extra_vars = []
}
withEnv(extra_vars) {
try {
// run deploy stages
deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes, upgrade_saltstack)
// run test stages
test(shared, common, steps, env_manager)
} catch (e) {
common.printMsg("Job is failed: " + e.message, "purple")
throw e
} finally {
if (make_snapshot_stages) {
// shutdown the environment if required
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
shared.run_cmd("""\
dos.py destroy ${ENV_NAME} || true
""")
}
}
stage("Archive all xml reports") {
dir("${env.slave_workdir }") {
archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
}
}
// delete directory with artifacts from swarm-run-pytest pipeline
// for excluding xml duplicates
if (fileExists("tmp")) {
shared.run_cmd("""\
rm -rf tmp/
""")
}
if ("${env.REPORT_TO_TESTRAIL}" != "false") {
stage("report results to testrail from jenkins master") {
common.printMsg("Running on: " + node_with_reports, "blue")
common.printMsg("Running on: " + env.NODE_NAME, "blue")
shared.swarm_testrail_report(steps, env.NODE_NAME)
}
stage("Store TestRail reports to job description") {
if (fileExists("description.txt")) {
def String description = readFile("description.txt")
currentBuild.description += "${description}"
}
}
}
if (fileExists("jenkins_agent_description.txt")) {
// if there is a separated foundation node on $jenkins_slave_node_name,
// then archive artifacts also on that node
if (jenkins_slave_node_name != env.NODE_NAME) {
node ("${jenkins_slave_node_name}") {
dir("${env.slave_workdir }") {
stage("Archive all xml reports from node ${jenkins_slave_node_name}") {
archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz,*.xml,*.ini,*.log,*.tar.gz", allowEmptyArchive: true
}
if ("${env.REPORT_TO_TESTRAIL}" != "false") {
stage("report results to testrail") {
common.printMsg("Running on: " + node_with_reports, "blue")
shared.swarm_testrail_report(steps, node_with_reports)
}
stage("Store TestRail reports to job description from ${jenkins_slave_node_name}") {
if (fileExists("description.txt")) {
def String description = readFile("description.txt")
currentBuild.description += "${description}"
}
}
}
} //dir
} // node
}
}
//run upgrade pike to queens only for pike proposed
if (env.AUTO_UPGRADE_TO_QUEENS == "true" && currentBuild.result == 'SUCCESS') {
def deploy = build job: "os-update-pike-to-queens",
parameters: [
string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.ENV_NAME}"),
string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
string(name: 'PASSED_STEPS', value: steps),
string(name: 'TEMPEST_TEST_SUITE_NAME', value: env.TEMPEST_TEST_SUITE_NAME),
string(name: 'NODE', value: "openstack_slave_${env.ENV_NAME}"),
string(name: 'RUN_TEST_OPTS', value: '-k TestUpdatePikeToQueens')
],
wait: false,
propagate: false
}
//run upgrade env to proposed
if (env.RUN_UPGRADE_AFTER_JOB == "true" && currentBuild.result == 'SUCCESS') {
network_backend = env.PLATFORM_STACK_INSTALL.contains("contrail") ? 'contrail' : 'dvr'
upgrade_job = "mcp-update-${env.TEMPEST_IMAGE_VERSION}-${network_backend}-sl"
upgrade_to_tag = ''
contrail_upgrade_line = ''
if(env.UPGRADE_TO_TAG == "true") {
upgrade_to_tag = "--update-to-tag ${upgrade_version_tag} "
}
if(env.PLATFORM_STACK_INSTALL.contains("contrail")) {
contrail_upgrade_line = "tcp_tests/tests/system/test_upgrade_contrail.py::TestUpdateContrail "
}
run_test_opts = """--keep-duplicates --maxfail=1 \
tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_drivetrain ${upgrade_to_tag} \
${contrail_upgrade_line}\
tcp_tests/tests/system/test_mcp_update.py::TestOpenstackUpdate \
tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_galera --update-mysql-version 5.7 \
tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_rabbit \
tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_stacklight \
tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_ceph \
\
tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
"""
if (env.IPMI_CREDS) {
if (env.ENV_NAME == "released-bm-b300-cicd-queens-ovs-maas") {
upgrade_job = "mcp-update-bm-b300-queens-ovs-maas"
}
if (env.ENV_NAME == "released-bm-cicd-queens-ovs-cl-maas") {
upgrade_job = "mcp-update-bm-queens-ovs-cl-maas"
}
}
def deploy = build job: "${upgrade_job}",
parameters: [
string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.ENV_NAME}"),
string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
string(name: 'PASSED_STEPS', value: steps),
string(name: 'TEMPEST_TEST_SUITE_NAME', value: env.TEMPEST_TEST_SUITE_NAME),
string(name: 'NODE', value: "openstack_slave_${env.ENV_NAME}"),
string(name: 'RUN_TEST_OPTS', value: run_test_opts)
],
wait: false,
propagate: false
}
} // try
}
} // node
//}
} // timeout