Add possibility to use ENV_MANAGER=heat in test pipelines
- If ENV_MANAGER=heat, environment snapshots are unavailable.
Carefully select the test cases for such environments.
- Added a new job swarm-bootstrap-salt-cluster-heat.groovy
to create environment in OpenStack
- Added new parameters for parent jobs:
ENV_MANAGER (default=devops)
OS_AUTH_URL (for ENV_MANAGER=heat) - Keystone URL
OS_PROJECT_NAME (for ENV_MANAGER=heat) - OS project name
OS_USER_DOMAIN_NAME (for ENV_MANAGER=heat) - OS user domain name
OS_CREDENTIALS (for ENV_MANAGER=heat) - Jenkins credentials
with username and password to access OpenStack
LAB_PARAM_DEFAULTS (for ENV_MANAGER=heat) - environment file
for Heat template with 'parameter_defaults' dict.
- Added requirements 'python-openstackclient' and 'python-glanceclient'
to operate images and heat stack from Jenkins pipeline scripts.
Related-task: #PROD-27687
Change-Id: I5b3a2fa3aac0bf3d592efa3617e25b8a965f377f
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index efeabba..392be7c 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -44,6 +44,12 @@
error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
dir("${PARENT_WORKSPACE}") {
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
+ }
+ }
+
stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
println "Remove environment ${ENV_NAME}"
shared.run_cmd("""\
@@ -55,12 +61,6 @@
""")
}
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
stage("Create an environment ${ENV_NAME} in disabled state") {
// deploy_hardware.xml
shared.run_cmd("""\
@@ -75,18 +75,36 @@
}
stage("Generate the model") {
- shared.generate_cookied_model()
+ def IPV4_NET_ADMIN=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+ def IPV4_NET_CONTROL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+ def IPV4_NET_TENANT=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+ def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+ shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
}
stage("Generate config drive ISO") {
- shared.generate_configdrive_iso()
+ def SALT_MASTER_IP=shared.run_cmd_stdout("""\
+ SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+ echo \$SALT_MASTER_INFO|cut -d',' -f2
+ """).trim().split("\n").last()
+ def dhcp_ranges_json=shared.run_cmd_stdout("""\
+ fgrep dhcp_ranges ${ENV_NAME}_hardware.ini |
+ fgrep "admin-pool01"|
+ cut -d"=" -f2
+ """).trim().split("\n").last()
+ def dhcp_ranges = new groovy.json.JsonSlurperClassic().parseText(dhcp_ranges_json)
+ def ADMIN_NETWORK_GW = dhcp_ranges['admin-pool01']['gateway']
+ shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
}
stage("Upload generated config drive ISO into volume on cfg01 node") {
+ def SALT_MASTER_HOSTNAME=shared.run_cmd_stdout("""\
+ SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+ echo \$SALT_MASTER_INFO|cut -d',' -f1
+ """).trim().split("\n").last()
shared.run_cmd("""\
# Get SALT_MASTER_HOSTNAME to determine the volume name
- . ./tcp_tests/utils/env_salt
- virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+ virsh vol-upload ${ENV_NAME}_${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
virsh pool-refresh --pool default
""")
}