Add possibility to use ENV_MANAGER=heat in test pipelines
- If ENV_MANAGER=heat, environment snapshots are unavailable.
Carefully select the test cases for such environments.
- Added a new job swarm-bootstrap-salt-cluster-heat.groovy
to create environment in OpenStack
- Added new parameters for parent jobs:
ENV_MANAGER (default=devops)
OS_AUTH_URL (for ENV_MANAGER=heat) - Keystone URL
OS_PROJECT_NAME (for ENV_MANAGER=heat) - OS project name
OS_USER_DOMAIN_NAME (for ENV_MANAGER=heat) - OS user domain name
OS_CREDENTIALS (for ENV_MANAGER=heat) - Jenkins credentials
with username and password to access OpenStack
LAB_PARAM_DEFAULTS (for ENV_MANAGER=heat) - environment file
for Heat template with 'parameter_defaults' dict.
- Added requirements 'python-openstackclient' and 'python-glanceclient'
to operate images and heat stack from Jenkins pipeline scripts.
Related-task: #PROD-27687
Change-Id: I5b3a2fa3aac0bf3d592efa3617e25b8a965f377f
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index f5c5b9a..e0e43ab 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -3,30 +3,52 @@
def common = new com.mirantis.mk.Common()
def shared = new com.mirantis.system_qa.SharedPipeline()
def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+def env_manager = env.ENV_MANAGER ?: 'devops'
+def jenkins_slave_node_name = "${NODE_NAME}"
currentBuild.description = "${NODE_NAME}:${ENV_NAME}"
-def deploy(shared, common, steps) {
+def deploy(shared, common, steps, env_manager, jenkins_slave_node_name) {
def report_text = ''
try {
stage("Clean the environment and clone tcp-qa") {
- shared.prepare_working_dir()
+ shared.prepare_working_dir(env_manager)
}
stage("Create environment, generate model, bootstrap the salt-cluster") {
// steps: "hardware,create_model,salt"
- shared.swarm_bootstrap_salt_cluster_devops()
+ if (env_manager == 'devops') {
+ jenkins_slave_node_name = "${NODE_NAME}"
+ shared.swarm_bootstrap_salt_cluster_devops()
+ } else if (env_manager == 'heat') {
+ def new_jenkins_slave_node_name = "openstack_slave_${JOB_NAME}"
+ // If shared.swarm_bootstrap_salt_cluster_heat() failed,
+ // do not schedule shared.swarm_testrail_report() on the non existing Jenkins slave
+ shared.swarm_bootstrap_salt_cluster_heat(new_jenkins_slave_node_name)
+ // When the Heat stack created, set jenkins_slave_node_name to the new Jenkins slave
+ jenkins_slave_node_name = new_jenkins_slave_node_name
+ } else {
+ throw new Exception("Unknow env_manager: '${env_manager}'")
+ }
}
stage("Install core infrastructure and deploy CICD nodes") {
- // steps: env.DRIVETRAIN_STACK_INSTALL
- shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT)
+ if (env.DRIVETRAIN_STACK_INSTALL) {
+ // steps: env.DRIVETRAIN_STACK_INSTALL
+ shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+ } else {
+ common.printMsg("DRIVETRAIN_STACK_INSTALL is empty, skipping 'swarm-deploy-cicd' job", "green")
+ }
}
stage("Deploy platform components") {
- // steps: env.PLATFORM_STACK_INSTALL
- shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
+ if (env.PLATFORM_STACK_INSTALL) {
+ // steps: env.PLATFORM_STACK_INSTALL
+ shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+ } else {
+ common.printMsg("PLATFORM_STACK_INSTALL is empty, skipping 'swarm-deploy-platform' job", "green")
+ }
}
currentBuild.result = 'SUCCESS'
@@ -34,42 +56,50 @@
} catch (e) {
common.printMsg("Deploy is failed: " + e.message , "purple")
report_text = e.message
- def snapshot_name = "deploy_failed"
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
- """)
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ if (env_manager == 'devops') {
+ def snapshot_name = "deploy_failed"
shared.run_cmd("""\
- dos.py resume ${ENV_NAME} || true
+ dos.py suspend ${ENV_NAME} || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME} || true
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
- shared.devops_snapshot_info(snapshot_name)
throw e
} finally {
shared.create_deploy_result_report(steps, currentBuild.result, report_text)
}
}
-def test(shared, common, steps) {
+def test(shared, common, steps, env_manager, jenkins_slave_node_name) {
try {
stage("Run tests") {
- shared.swarm_run_pytest(steps)
+ if (env.RUN_TEST_OPTS) {
+ shared.swarm_run_pytest(steps, jenkins_slave_node_name)
+ } else {
+ common.printMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
+ }
}
} catch (e) {
common.printMsg("Tests are failed: " + e.message, "purple")
- def snapshot_name = "tests_failed"
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
- """)
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ if (env_manager == 'devops') {
+ def snapshot_name = "tests_failed"
shared.run_cmd("""\
- dos.py resume ${ENV_NAME} || true
+ dos.py suspend ${ENV_NAME} || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME} || true
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
- shared.devops_snapshot_info(snapshot_name)
throw e
}
}
@@ -80,18 +110,20 @@
node ("${NODE_NAME}") {
try {
// run deploy stages
- deploy(shared, common, steps)
+ deploy(shared, common, steps, env_manager, jenkins_slave_node_name)
// run test stages
- test(shared, common, steps)
+ test(shared, common, steps, env_manager, jenkins_slave_node_name)
} catch (e) {
common.printMsg("Job is failed: " + e.message, "purple")
throw e
} finally {
- // shutdown the environment if required
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
+ if (env_manager == 'devops') {
+ // shutdown the environment if required
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ }
}
stage("Archive all xml reports") {
@@ -99,7 +131,7 @@
}
if ("${env.REPORT_TO_TESTRAIL}" != "false") {
stage("report results to testrail") {
- shared.swarm_testrail_report(steps)
+ shared.swarm_testrail_report(steps, jenkins_slave_node_name)
}
stage("Store TestRail reports to job description") {
def String description = readFile("description.txt")
diff --git a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
index 96ddf76..33f8516 100644
--- a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
@@ -49,7 +49,7 @@
def test(shared, common, steps) {
try {
stage("Run tests") {
- shared.swarm_run_pytest(steps)
+ shared.swarm_run_pytest(steps, "${NODE_NAME}")
}
} catch (e) {
@@ -93,7 +93,7 @@
archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
}
stage("report results to testrail") {
- shared.swarm_testrail_report(steps)
+ shared.swarm_testrail_report(steps, "${NODE_NAME}")
}
stage("Store TestRail reports to job description") {
def String description = readFile("description.txt")
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index efeabba..392be7c 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -44,6 +44,12 @@
error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
dir("${PARENT_WORKSPACE}") {
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
+ }
+ }
+
stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
println "Remove environment ${ENV_NAME}"
shared.run_cmd("""\
@@ -55,12 +61,6 @@
""")
}
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
stage("Create an environment ${ENV_NAME} in disabled state") {
// deploy_hardware.xml
shared.run_cmd("""\
@@ -75,18 +75,36 @@
}
stage("Generate the model") {
- shared.generate_cookied_model()
+ def IPV4_NET_ADMIN=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+ def IPV4_NET_CONTROL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+ def IPV4_NET_TENANT=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+ def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+ shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
}
stage("Generate config drive ISO") {
- shared.generate_configdrive_iso()
+ def SALT_MASTER_IP=shared.run_cmd_stdout("""\
+ SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+ echo \$SALT_MASTER_INFO|cut -d',' -f2
+ """).trim().split("\n").last()
+ def dhcp_ranges_json=shared.run_cmd_stdout("""\
+ fgrep dhcp_ranges ${ENV_NAME}_hardware.ini |
+ fgrep "admin-pool01"|
+ cut -d"=" -f2
+ """).trim().split("\n").last()
+ def dhcp_ranges = new groovy.json.JsonSlurperClassic().parseText(dhcp_ranges_json)
+ def ADMIN_NETWORK_GW = dhcp_ranges['admin-pool01']['gateway']
+ shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
}
stage("Upload generated config drive ISO into volume on cfg01 node") {
+ def SALT_MASTER_HOSTNAME=shared.run_cmd_stdout("""\
+ SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+ echo \$SALT_MASTER_INFO|cut -d',' -f1
+ """).trim().split("\n").last()
shared.run_cmd("""\
# Get SALT_MASTER_HOSTNAME to determine the volume name
- . ./tcp_tests/utils/env_salt
- virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+ virsh vol-upload ${ENV_NAME}_${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
virsh pool-refresh --pool default
""")
}
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
new file mode 100644
index 0000000..fc12976
--- /dev/null
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -0,0 +1,298 @@
+/**
+ *
+ * Create fuel-devops environment, generate a model for it
+ * and bootstrap a salt cluster on the environment nodes
+ *
+ * Expected parameters:
+
+ * PARENT_NODE_NAME Name of the jenkins slave to create the environment
+ * PARENT_WORKSPACE Path to the workspace of the parent job to use tcp-qa repo
+ * LAB_CONFIG_NAME Name of the tcp-qa deployment template
+ * ENV_NAME Fuel-devops environment name
+ * MCP_VERSION MCP version, like 2018.4 or proposed
+ * MCP_IMAGE_PATH1604 Local path to the image http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+ * IMAGE_PATH_CFG01_DAY01 Local path to the image http://ci.mcp.mirantis.net:8085/images/cfg01-day01-proposed.qcow2
+ * CFG01_CONFIG_IMAGE_NAME Name for the creating config drive image, like cfg01.${LAB_CONFIG_NAME}-config-drive.iso
+ * TCP_QA_REFS Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ * PIPELINE_LIBRARY_REF Reference to the pipeline-library change
+ * MK_PIPELINES_REF Reference to the mk-pipelines change
+ * COOKIECUTTER_TEMPLATE_COMMIT Commit/tag/branch for cookiecutter-templates repository. If empty, then takes ${MCP_VERSION} value
+ * SALT_MODELS_SYSTEM_COMMIT Commit/tag/branch for reclass-system repository. If empty, then takes ${MCP_VERSION} value
+ * SHUTDOWN_ENV_ON_TEARDOWN optional, shutdown fuel-devops environment at the end of the job
+ * MCP_SALT_REPO_URL Base URL for MCP repositories required to bootstrap cfg01 node. Leave blank to use default
+ * (http://mirror.mirantis.com/ from mcp-common-scripts)
+ * MCP_SALT_REPO_KEY URL of the key file. Leave blank to use default
+ * (${MCP_SALT_REPO_URL}/${MCP_VERSION}/salt-formulas/xenial/archive-salt-formulas.key from mcp-common-scripts)
+ * OS_AUTH_URL OpenStack keystone catalog URL
+ * OS_PROJECT_NAME OpenStack project (tenant) name
+ * OS_USER_DOMAIN_NAME OpenStack user domain name
+ * OS_CREDENTIALS OpenStack username and password credentials ID in Jenkins
+ * LAB_PARAM_DEFAULTS Filename placed in tcp_tests/templates/_heat_environments, with default parameters for the heat template
+ *
+ */
+
+@Library('tcp-qa')_
+
+import groovy.xml.XmlUtil
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+ error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
+def cfg01_day01_image_name = "cfg01-day01-${MCP_VERSION}"
+def ubuntu_vcp_image_name = "ubuntu-vcp-${MCP_VERSION}"
+def ubuntu_foundation_image_name = "ubuntu-16.04-foundation-${MCP_VERSION}"
+
+node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+ }
+ dir("${PARENT_WORKSPACE}") {
+
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
+ }
+ }
+
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : env.OS_CREDENTIALS,
+ passwordVariable: 'OS_PASSWORD',
+ usernameVariable: 'OS_USERNAME']
+ ]) {
+ env.OS_IDENTITY_API_VERSION = 3
+
+ stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+
+ // delete heat stack
+ println "Remove heat stack '${ENV_NAME}'"
+ shared.run_cmd("""\
+ # export OS_IDENTITY_API_VERSION=3
+ # export OS_AUTH_URL=${OS_AUTH_URL}
+ # export OS_USERNAME=${OS_USERNAME}
+ # export OS_PASSWORD=${OS_PASSWORD}
+ # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+ openstack --insecure stack delete -y ${ENV_NAME} || true
+ while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done
+ """)
+
+ println "Remove config drive ISO"
+ shared.run_cmd("""\
+ rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+ """)
+ }
+
+ stage("Generate the model") {
+ def IPV4_NET_ADMIN=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cidr").trim().split().last()
+ def IPV4_NET_CONTROL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py control_subnet_cidr").trim().split().last()
+ def IPV4_NET_TENANT=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py tenant_subnet_cidr").trim().split().last()
+ def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py external_subnet_cidr").trim().split().last()
+ shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
+ }
+
+ stage("Generate config drive ISO") {
+ def SALT_MASTER_IP=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cfg01_ip").trim().split().last()
+ def ADMIN_NETWORK_GW=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_gateway_ip").trim().split().last()
+ shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
+ }
+
+ stage("Upload Ubuntu image for foundation node") {
+ shared.run_cmd("""\
+ if ! openstack --insecure image show ${ubuntu_foundation_image_name} -f value -c name; then
+ wget -O ./${ubuntu_foundation_image_name} https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+ openstack --insecure image create ${ubuntu_foundation_image_name} --file ./${ubuntu_foundation_image_name} --disk-format qcow2 --container-format bare
+ rm ./${ubuntu_foundation_image_name}
+ else
+ echo Image ${ubuntu_foundation_image_name} already exists
+ fi
+ """)
+ }
+
+ stage("Upload cfg01-day01 and VCP images") {
+ shared.run_cmd("""\
+ # export OS_IDENTITY_API_VERSION=3
+ # export OS_AUTH_URL=${OS_AUTH_URL}
+ # export OS_USERNAME=${OS_USERNAME}
+ # export OS_PASSWORD=${OS_PASSWORD}
+ # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+ openstack --insecure image show ${cfg01_day01_image_name} -f value -c name || openstack --insecure image create ${cfg01_day01_image_name} --file ${IMAGE_PATH_CFG01_DAY01} --disk-format qcow2 --container-format bare
+ openstack --insecure image show ${ubuntu_vcp_image_name} -f value -c name || openstack --insecure image create ${ubuntu_vcp_image_name} --file ${MCP_IMAGE_PATH1604} --disk-format qcow2 --container-format bare
+ """)
+ }
+
+ stage("Upload generated config drive ISO into volume on cfg01 node") {
+ shared.run_cmd("""\
+ # export OS_IDENTITY_API_VERSION=3
+ # export OS_AUTH_URL=${OS_AUTH_URL}
+ # export OS_USERNAME=${OS_USERNAME}
+ # export OS_PASSWORD=${OS_PASSWORD}
+ # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+ openstack --insecure image delete cfg01.${ENV_NAME}-config-drive.iso || true
+ sleep 3
+ openstack --insecure image create cfg01.${ENV_NAME}-config-drive.iso --file /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --disk-format iso --container-format bare
+ """)
+ }
+
+ stage("Create Heat stack '${ENV_NAME}'") {
+ // Create stack and wait for CREATE_COMPLETED status, manual analog:
+ // openstack --insecure stack create ${ENV_NAME} \
+ // --template ./tcp_tests/templates/${LAB_CONFIG_NAME}/underlay.hot \
+ // --environment ./tcp_tests/templates/_heat_environments/${LAB_PARAM_DEFAULTS} \
+ // --parameter env_name=${ENV_NAME} --parameter mcp_version=${MCP_VERSION}
+ shared.run_cmd("""\
+ export BOOTSTRAP_TIMEOUT=3600
+ export ENV_MANAGER=heat
+ export TEST_GROUP=test_create_environment
+ export SHUTDOWN_ENV_ON_TEARDOWN=false
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ export ENV_NAME=${ENV_NAME}
+ export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+ export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+ py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+ """)
+ }
+
+ stage("Add the Jenkins slave node") {
+ def jenkins_slave_ip_value_name = "foundation_floating"
+ def jenkins_slave_ip = shared.run_cmd_stdout("openstack --insecure stack output show ${ENV_NAME} ${jenkins_slave_ip_value_name} -f value -c output_value").trim().split().last()
+ def jenkins_slave_executors = 2
+ common.printMsg("JENKINS_SLAVE_NODE_NAME=${JENKINS_SLAVE_NODE_NAME}", "green")
+ common.printMsg("JENKINS_SLAVE_IP=${jenkins_slave_ip}", "green")
+
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : "${CREATE_JENKINS_NODE_CREDENTIALS}",
+ passwordVariable: 'JENKINS_PASS',
+ usernameVariable: 'JENKINS_USER']
+ ]) {
+
+ script_delete_agent = ("""\
+ CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+ curl -w '%{http_code}' -o /dev/null \
+ -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+ -H \"Content-Type:application/x-www-form-urlencoded\" \
+ -H \"\$CRUMB\" \
+ \"\${JENKINS_URL}/computer/\${JENKINS_SLAVE_NODE_NAME}/doDelete\" \
+ --request \'POST\' --data \'\'
+ sleep 10
+ """)
+
+ script_create_agent = ("""\
+ CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+
+ curl -L -sS -w '%{http_code}' -o /dev/null \
+ -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+ -H \"Content-Type:application/x-www-form-urlencoded\" \
+ -H \"\$CRUMB\" \
+ -X POST -d 'json={\
+ \"name\": \"'\"\$JENKINS_SLAVE_NODE_NAME\"'\", \
+ \"nodeDescription\": \"'\"\$ENV_NAME\"'\", \
+ \"numExecutors\": \"'\"${jenkins_slave_executors}\"'\", \
+ \"remoteFS\": \"'\"/home/jenkins/workspace\"'\", \
+ \"labelString\": \"'\"\$ENV_NAME\"'\", \
+ \"mode\": \"EXCLUSIVE\", \
+ \"\": [\"hudson.plugins.sshslaves.SSHLauncher\", \"hudson.slaves.RetentionStrategy\$Always\"], \
+ \"launcher\": {\
+ \"stapler-class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+ \"\$class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+ \"host\": \"'\"${jenkins_slave_ip}\"'\", \
+ \"credentialsId\": \"'\"\$ACCESS_JENKINS_NODE_CREDENTIALS\"'\", \
+ \"port\": \"'\"22\"'\", \
+ \"javaPath\": \"\", \
+ \"jvmOptions\": \"\", \
+ \"prefixStartSlaveCmd\": \"\", \
+ \"suffixStartSlaveCmd\": \"\", \
+ \"launchTimeoutSeconds\": \"\", \
+ \"maxNumRetries\": \"\", \
+ \"retryWaitTime\": \"\", \
+ \"sshHostKeyVerificationStrategy\": {\
+ \"\$class\": \"hudson.plugins.sshslaves.verifiers.NonVerifyingKeyVerificationStrategy\" \
+ }, \
+ \"tcpNoDelay\": \"true\"\
+ }, \
+ \"retentionStrategy\": {\
+ \"stapler-class\": \"hudson.slaves.RetentionStrategy\$Always\", \
+ \"\$class\": \"hudson.slaves.RetentionStrategy\$Always\"\
+ }, \
+ \"nodeProperties\": {\
+ \"stapler-class-bag\": \"true\"\
+ }, \
+ \"type\": \"hudson.slaves.DumbSlave\", \
+ \"crumb\": \"'\"\$CRUMB\"'\"}' \
+ \"\${JENKINS_URL}/computer/doCreateItem?name=\${JENKINS_SLAVE_NODE_NAME}&type=hudson.slaves.DumbSlave\"
+ """)
+ shared.verbose_sh(script_delete_agent, true, false, true)
+ shared.verbose_sh(script_create_agent, true, false, true)
+
+ } // withCredentials
+
+ }// stage
+
+ } // withCredentials
+ } // dir
+} // node
+
+
+node ("${JENKINS_SLAVE_NODE_NAME}") {
+ dir("${PARENT_WORKSPACE}") {
+
+ stage("Clean the environment and clone tcp-qa") {
+ deleteDir()
+ shared.run_cmd("""\
+ git clone https://github.com/Mirantis/tcp-qa.git ${PARENT_WORKSPACE}
+ """)
+ shared.update_working_dir()
+ }
+
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : env.OS_CREDENTIALS,
+ passwordVariable: 'OS_PASSWORD',
+ usernameVariable: 'OS_USERNAME']
+ ]) {
+
+
+ stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+ def xml_report_name = "deploy_salt.xml"
+ try {
+ // deploy_salt.xml
+ shared.run_sh("""\
+ export ENV_NAME=${ENV_NAME}
+ export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+ export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+ export ENV_MANAGER=heat
+ export SHUTDOWN_ENV_ON_TEARDOWN=false
+ export BOOTSTRAP_TIMEOUT=3600
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ export TEST_GROUP=test_bootstrap_salt
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
+ """)
+ // Wait for jenkins to start and IO calm down
+ sleep(60)
+
+ } catch (e) {
+ common.printMsg("Saltstack cluster deploy is failed", "purple")
+ if (fileExists(xml_report_name)) {
+ shared.download_logs("deploy_salt_${ENV_NAME}")
+ def String junit_report_xml = readFile(xml_report_name)
+ def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+ throw new Exception(junit_report_xml_pretty)
+ } else {
+ throw e
+ }
+ } finally {
+ // TODO(ddmitriev): add checks for salt cluster
+ }
+ } // stage
+ } // withCredentials
+ } // dir
+} // node
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index c4927ea..c636e71 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -6,6 +6,16 @@
return Character.isDigit(version.charAt(0))
}
+def verbose_sh(String script, Boolean returnStatus=false, Boolean returnStdout=false, Boolean verboseStdout=false) {
+ def common = new com.mirantis.mk.Common()
+ common.printMsg("Run shell command:\n" + script, "blue")
+ def result = sh(script: script, returnStatus: returnStatus, returnStdout: returnStdout)
+ if (verboseStdout) {
+ common.printMsg("Output:\n" + result, "cyan")
+ }
+ return result
+}
+
def run_sh(String cmd) {
// run shell script without catching any output
def common = new com.mirantis.mk.Common()
@@ -125,15 +135,41 @@
}
}
-def prepare_working_dir() {
+def prepare_working_dir(env_manager) {
println "Clean the working directory ${env.WORKSPACE}"
deleteDir()
- // do not fail if environment doesn't exists
- println "Remove environment ${ENV_NAME}"
- run_cmd("""\
- dos.py erase ${ENV_NAME} || true
- """)
+ if (env_manager == 'devops') {
+ // do not fail if environment doesn't exists
+ println "Remove fuel-devops environment '${ENV_NAME}'"
+ run_cmd("""\
+ dos.py erase ${ENV_NAME} || true
+ """)
+ } else if (env_manager == 'heat') {
+ // delete heat stack
+ println "Remove heat stack '${ENV_NAME}'"
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : env.OS_CREDENTIALS,
+ passwordVariable: 'OS_PASSWORD',
+ usernameVariable: 'OS_USERNAME']
+ ]) {
+ run_cmd("""\
+ export OS_IDENTITY_API_VERSION=3
+ export OS_AUTH_URL=${OS_AUTH_URL}
+ export OS_USERNAME=${OS_USERNAME}
+ export OS_PASSWORD=${OS_PASSWORD}
+ export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+ export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME}
+ openstack --insecure stack delete -y ${ENV_NAME} || true
+ while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done
+ """)
+ }
+
+ } else {
+ throw new Exception("Unknown env_manager: '${env_manager}'")
+ }
+
println "Remove config drive ISO"
run_cmd("""\
rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
@@ -207,12 +243,64 @@
build_pipeline_job('swarm-bootstrap-salt-cluster-devops', parameters)
}
-def swarm_deploy_cicd(String stack_to_install, String install_timeout) {
+def swarm_bootstrap_salt_cluster_heat(String jenkins_slave_node_name) {
+ // jenkins_slave_node_name
+ def common = new com.mirantis.mk.Common()
+ def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: "release/${env.MCP_VERSION}"
+ def salt_models_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: "release/${env.MCP_VERSION}"
+ def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+ def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
+ def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
+ def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
+ def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
+ def mcp_salt_repo_url = env.MCP_SALT_REPO_URL ?: ''
+ def mcp_salt_repo_key = env.MCP_SALT_REPO_KEY ?: ''
+ def env_ipmi_user = env.IPMI_USER ?: ''
+ def env_ipmi_pass = env.IPMI_PASS ?: ''
+ def env_lab_mgm_iface = env.LAB_MANAGEMENT_IFACE ?: ''
+ def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
+ def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
+ def parameters = [
+ string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'JENKINS_SLAVE_NODE_NAME', value: jenkins_slave_node_name),
+ string(name: 'PARENT_WORKSPACE', value: pwd()),
+ string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
+ string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+ string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
+ string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
+ string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
+ string(name: 'CFG01_CONFIG_IMAGE_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
+ string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+ string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
+ string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+ string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecutter_template_commit}"),
+ string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${salt_models_system_commit}"),
+ string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
+ string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
+ string(name: 'MCP_SALT_REPO_URL', value: "${mcp_salt_repo_url}"),
+ string(name: 'MCP_SALT_REPO_KEY', value: "${mcp_salt_repo_key}"),
+ string(name: 'IPMI_USER', value: env_ipmi_user),
+ string(name: 'IPMI_PASS', value: env_ipmi_pass),
+ string(name: 'LAB_MANAGEMENT_IFACE', value: env_lab_mgm_iface),
+ string(name: 'LAB_CONTROL_IFACE', value: env_lab_ctl_iface),
+ string(name: 'UPDATE_REPO_CUSTOM_TAG', value: "${update_repo_custom_tag}"),
+ string(name: 'OS_AUTH_URL', value: "${OS_AUTH_URL}"),
+ string(name: 'OS_PROJECT_NAME', value: "${OS_PROJECT_NAME}"),
+ string(name: 'OS_USER_DOMAIN_NAME', value: "${OS_USER_DOMAIN_NAME}"),
+ string(name: 'OS_CREDENTIALS', value: "${OS_CREDENTIALS}"),
+ string(name: 'LAB_PARAM_DEFAULTS', value: "${LAB_PARAM_DEFAULTS}"),
+ booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+ ]
+
+ build_pipeline_job('swarm-bootstrap-salt-cluster-heat', parameters)
+}
+
+def swarm_deploy_cicd(String stack_to_install, String install_timeout, String jenkins_slave_node_name) {
// Run openstack_deploy job on cfg01 Jenkins for specified stacks
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
def parameters = [
- string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'PARENT_NODE_NAME', value: jenkins_slave_node_name),
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'STACK_INSTALL', value: stack_to_install),
@@ -223,12 +311,12 @@
build_pipeline_job('swarm-deploy-cicd', parameters)
}
-def swarm_deploy_platform(String stack_to_install, String install_timeout) {
+def swarm_deploy_platform(String stack_to_install, String install_timeout, String jenkins_slave_node_name) {
// Run openstack_deploy job on CICD Jenkins for specified stacks
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
def parameters = [
- string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'PARENT_NODE_NAME', value: jenkins_slave_node_name),
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'STACK_INSTALL', value: stack_to_install),
@@ -255,7 +343,7 @@
build_pipeline_job('swarm-deploy-platform-without-cicd', parameters)
}
-def swarm_run_pytest(String passed_steps) {
+def swarm_run_pytest(String passed_steps, String jenkins_slave_node_name) {
// Run pytest tests
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -266,7 +354,7 @@
string(name: 'ENV_NAME', value: "${ENV_NAME}"),
string(name: 'PASSED_STEPS', value: passed_steps),
string(name: 'RUN_TEST_OPTS', value: "${RUN_TEST_OPTS}"),
- string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'PARENT_NODE_NAME', value: jenkins_slave_node_name),
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
@@ -286,7 +374,7 @@
parameters: parameters
}
-def swarm_testrail_report(String passed_steps) {
+def swarm_testrail_report(String passed_steps, String jenkins_slave_node_name) {
// Run pytest tests
def common = new com.mirantis.mk.Common()
def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -297,7 +385,7 @@
string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
string(name: 'PASSED_STEPS', value: passed_steps),
- string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'PARENT_NODE_NAME', value: jenkins_slave_node_name),
string(name: 'PARENT_WORKSPACE', value: pwd()),
string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
string(name: 'TEMPEST_TEST_SUITE_NAME', value: "${tempest_test_suite_name}"),
@@ -309,13 +397,8 @@
parameters: parameters
}
-def generate_cookied_model() {
+def generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL) {
def common = new com.mirantis.mk.Common()
- // do not fail if environment doesn't exists
- def IPV4_NET_ADMIN=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
- def IPV4_NET_CONTROL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
- def IPV4_NET_TENANT=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
- def IPV4_NET_EXTERNAL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
println("IPV4_NET_ADMIN=" + IPV4_NET_ADMIN)
println("IPV4_NET_CONTROL=" + IPV4_NET_CONTROL)
println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
@@ -351,22 +434,9 @@
build_shell_job('swarm-cookied-model-generator', parameters, "deploy_generate_model.xml")
}
-def generate_configdrive_iso() {
+def generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW) {
def common = new com.mirantis.mk.Common()
- def SALT_MASTER_IP=run_cmd_stdout("""\
- export ENV_NAME=${ENV_NAME}
- . ./tcp_tests/utils/env_salt
- echo \$SALT_MASTER_IP
- """).trim().split().last()
println("SALT_MASTER_IP=" + SALT_MASTER_IP)
-
- def dhcp_ranges_json=run_cmd_stdout("""\
- fgrep dhcp_ranges ${ENV_NAME}_hardware.ini |
- fgrep "admin-pool01"|
- cut -d"=" -f2
- """).trim().split("\n").last()
- def dhcp_ranges = new groovy.json.JsonSlurperClassic().parseText(dhcp_ranges_json)
- def ADMIN_NETWORK_GW = dhcp_ranges['admin-pool01']['gateway']
println("ADMIN_NETWORK_GW=" + ADMIN_NETWORK_GW)
def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index d9eae28..759a449 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -26,4 +26,6 @@
# For Queens: https://github.com/openstack/requirements/blob/stable/queens/global-requirements.txt
python-heatclient>=1.10.0
-keystoneauth1>=3.3.0
\ No newline at end of file
+python-glanceclient>=2.8.0
+python-openstackclient>=3.12.0
+keystoneauth1>=3.3.0