Add timestamps to jobs output
PROD-35295
Change-Id: Ib308e07a7720154b157b70ea4b028628605df7aa
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index 2062c70..10ea257 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -51,276 +51,278 @@
def ubuntu_foundation_image_name = "ubuntu-16.04-foundation-2019.2.0"
timeout(time: 2, unit: 'HOURS') {
-node ("${PARENT_NODE_NAME}") {
- if (! fileExists("${PARENT_WORKSPACE}")) {
- error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
- }
- dir("${PARENT_WORKSPACE}") {
-
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
+ timestamps {
+ node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
- }
+ dir("${PARENT_WORKSPACE}") {
- withCredentials([
- [$class : 'UsernamePasswordMultiBinding',
- credentialsId : env.OS_CREDENTIALS,
- passwordVariable: 'OS_PASSWORD',
- usernameVariable: 'OS_USERNAME']
- ]) {
- env.OS_IDENTITY_API_VERSION = 3
-
- stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
-
- // delete heat stack
- println "Remove heat stack '${ENV_NAME}'"
- shared.run_cmd("""\
- # export OS_IDENTITY_API_VERSION=3
- # export OS_AUTH_URL=${OS_AUTH_URL}
- # export OS_USERNAME=${OS_USERNAME}
- # export OS_PASSWORD=${OS_PASSWORD}
- # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
- openstack --insecure stack delete -y ${ENV_NAME} || true
- timeout 20m /bin/bash -c "while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done"
- """)
-
- println "Remove config drive ISO"
- shared.run_cmd("""\
- rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
- """)
- }
-
- stage("Generate the model") {
- def IPV4_NET_ADMIN=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cidr").trim().split().last()
- def IPV4_NET_CONTROL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py control_subnet_cidr").trim().split().last()
- def IPV4_NET_TENANT=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py tenant_subnet_cidr").trim().split().last()
- def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py external_subnet_cidr").trim().split().last()
- shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
- }
-
- stage("Generate config drive ISO") {
- SALT_MASTER_IP=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cfg01_ip").trim().split().last()
- def ADMIN_NETWORK_GW=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_gateway_ip").trim().split().last()
- shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
- }
-
- stage("Upload Ubuntu image for foundation node") {
- shared.run_cmd("""\
- if ! openstack --insecure image show ${ubuntu_foundation_image_name} -f value -c name; then
- wget -O ./${ubuntu_foundation_image_name} https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
- openstack --insecure image create ${ubuntu_foundation_image_name} --file ./${ubuntu_foundation_image_name} --disk-format qcow2 --container-format bare
- rm ./${ubuntu_foundation_image_name}
- else
- echo Image ${ubuntu_foundation_image_name} already exists
- fi
- """)
- }
-
- stage("Upload cfg01-day01 and VCP images") {
- shared.run_cmd("""\
- # export OS_IDENTITY_API_VERSION=3
- # export OS_AUTH_URL=${OS_AUTH_URL}
- # export OS_USERNAME=${OS_USERNAME}
- # export OS_PASSWORD=${OS_PASSWORD}
- # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
-
- openstack --insecure image show ${cfg01_day01_image_name} -f value -c name || openstack --insecure image create ${cfg01_day01_image_name} --file ${IMAGE_PATH_CFG01_DAY01} --disk-format qcow2 --container-format bare
- openstack --insecure image show ${ubuntu_vcp_image_name} -f value -c name || openstack --insecure image create ${ubuntu_vcp_image_name} --file ${MCP_IMAGE_PATH1604} --disk-format qcow2 --container-format bare
- """)
- }
-
- stage("Upload generated config drive ISO into volume on cfg01 node") {
- shared.run_cmd("""\
- # export OS_IDENTITY_API_VERSION=3
- # export OS_AUTH_URL=${OS_AUTH_URL}
- # export OS_USERNAME=${OS_USERNAME}
- # export OS_PASSWORD=${OS_PASSWORD}
- # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
-
- openstack --insecure image delete cfg01.${ENV_NAME}-config-drive.iso || true
- sleep 3
- openstack --insecure image create cfg01.${ENV_NAME}-config-drive.iso --file /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --disk-format iso --container-format bare
- """)
- }
-
- stage("Create Heat stack '${ENV_NAME}'") {
- // Create stack and wait for CREATE_COMPLETED status, manual analog:
- // openstack --insecure stack create ${ENV_NAME} \
- // --template ./tcp_tests/templates/${LAB_CONFIG_NAME}/underlay.hot \
- // --environment ./tcp_tests/templates/_heat_environments/${LAB_PARAM_DEFAULTS} \
- // --parameter env_name=${ENV_NAME} --parameter mcp_version=${MCP_VERSION}
- shared.run_cmd("""\
- export BOOTSTRAP_TIMEOUT=3600
- export ENV_MANAGER=heat
- export TEST_GROUP=test_create_environment
- export SHUTDOWN_ENV_ON_TEARDOWN=false
- export PYTHONIOENCODING=UTF-8
- export REPOSITORY_SUITE=${MCP_VERSION}
- export UPDATE_VERSION=${UPDATE_VERSION}
- export ENV_NAME=${ENV_NAME}
- export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
- export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
- export LOG_NAME=swarm_test_create_environment.log
- py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
- """)
- }
-
- stage("Add the Jenkins slave node") {
- def jenkins_slave_ip_value_name = "foundation_public_ip"
- def jenkins_slave_ip = shared.run_cmd_stdout("openstack --insecure stack output show ${ENV_NAME} ${jenkins_slave_ip_value_name} -f value -c output_value").trim().split().last()
- def jenkins_slave_executors = 2
- common.printMsg("JENKINS_SLAVE_NODE_NAME=${JENKINS_SLAVE_NODE_NAME}", "green")
- common.printMsg("JENKINS_SLAVE_IP=${jenkins_slave_ip}", "green")
-
- withCredentials([
- [$class : 'UsernamePasswordMultiBinding',
- credentialsId : "${CREATE_JENKINS_NODE_CREDENTIALS}",
- passwordVariable: 'JENKINS_PASS',
- usernameVariable: 'JENKINS_USER']
- ]) {
-
- script_delete_agent = ("""\
- CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
- curl -w '%{http_code}' -o /dev/null \
- -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
- -H \"Content-Type:application/x-www-form-urlencoded\" \
- -H \"\$CRUMB\" \
- \"\${JENKINS_URL}/computer/\${JENKINS_SLAVE_NODE_NAME}/doDelete\" \
- --request \'POST\' --data \'\'
- sleep 10
- """)
-
- script_create_agent = ("""\
- CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
-
- curl -L -sS -w '%{http_code}' -o /dev/null \
- -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
- -H \"Content-Type:application/x-www-form-urlencoded\" \
- -H \"\$CRUMB\" \
- -X POST -d 'json={\
- \"name\": \"'\"\$JENKINS_SLAVE_NODE_NAME\"'\", \
- \"nodeDescription\": \"'\"\$ENV_NAME\"'\", \
- \"numExecutors\": \"'\"${jenkins_slave_executors}\"'\", \
- \"remoteFS\": \"'\"/home/jenkins/workspace\"'\", \
- \"labelString\": \"'\"\$ENV_NAME\"'\", \
- \"mode\": \"EXCLUSIVE\", \
- \"\": [\"hudson.plugins.sshslaves.SSHLauncher\", \"hudson.slaves.RetentionStrategy\$Always\"], \
- \"launcher\": {\
- \"stapler-class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
- \"\$class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
- \"host\": \"'\"${jenkins_slave_ip}\"'\", \
- \"credentialsId\": \"'\"\$ACCESS_JENKINS_NODE_CREDENTIALS\"'\", \
- \"port\": \"'\"22\"'\", \
- \"javaPath\": \"\", \
- \"jvmOptions\": \"\", \
- \"prefixStartSlaveCmd\": \"\", \
- \"suffixStartSlaveCmd\": \"\", \
- \"launchTimeoutSeconds\": \"\", \
- \"maxNumRetries\": \"\", \
- \"retryWaitTime\": \"\", \
- \"sshHostKeyVerificationStrategy\": {\
- \"\$class\": \"hudson.plugins.sshslaves.verifiers.NonVerifyingKeyVerificationStrategy\" \
- }, \
- \"tcpNoDelay\": \"true\"\
- }, \
- \"retentionStrategy\": {\
- \"stapler-class\": \"hudson.slaves.RetentionStrategy\$Always\", \
- \"\$class\": \"hudson.slaves.RetentionStrategy\$Always\"\
- }, \
- \"nodeProperties\": {\
- \"stapler-class-bag\": \"true\"\
- }, \
- \"type\": \"hudson.slaves.DumbSlave\", \
- \"crumb\": \"'\"\$CRUMB\"'\"}' \
- \"\${JENKINS_URL}/computer/doCreateItem?name=\${JENKINS_SLAVE_NODE_NAME}&type=hudson.slaves.DumbSlave\"
- """)
- shared.verbose_sh(script_delete_agent, true, false, true)
- shared.verbose_sh(script_create_agent, true, false, true)
- timeout(time: 30, unit: 'MINUTES') {
- node("${JENKINS_SLAVE_NODE_NAME}") {
- sh "echo 'ok'"
- println "Jenkins agent is available now and can executes commands"
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
}
}
- // Store jenkins agent IP address
- jenkins_agent_description = "ssh jenkins@${jenkins_slave_ip} # foundation node with Jenkins agent <a href=${JENKINS_URL}/computer/${JENKINS_SLAVE_NODE_NAME}>${JENKINS_SLAVE_NODE_NAME}</a><br>ssh root@${SALT_MASTER_IP} # cfg01 node<br>"
- writeFile(file: "jenkins_agent_description.txt", text: jenkins_agent_description, encoding: "UTF-8")
- } // withCredentials
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : env.OS_CREDENTIALS,
+ passwordVariable: 'OS_PASSWORD',
+ usernameVariable: 'OS_USERNAME']
+ ]) {
+ env.OS_IDENTITY_API_VERSION = 3
- }// stage
+ stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
- } // withCredentials
+ // delete heat stack
+ println "Remove heat stack '${ENV_NAME}'"
+ shared.run_cmd("""\
+ # export OS_IDENTITY_API_VERSION=3
+ # export OS_AUTH_URL=${OS_AUTH_URL}
+ # export OS_USERNAME=${OS_USERNAME}
+ # export OS_PASSWORD=${OS_PASSWORD}
+ # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+ openstack --insecure stack delete -y ${ENV_NAME} || true
+ timeout 20m /bin/bash -c "while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done"
+ """)
- } // dir
-} // node
+ println "Remove config drive ISO"
+ shared.run_cmd("""\
+ rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+ """)
+ }
+
+ stage("Generate the model") {
+ def IPV4_NET_ADMIN=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cidr").trim().split().last()
+ def IPV4_NET_CONTROL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py control_subnet_cidr").trim().split().last()
+ def IPV4_NET_TENANT=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py tenant_subnet_cidr").trim().split().last()
+ def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py external_subnet_cidr").trim().split().last()
+ shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
+ }
+
+ stage("Generate config drive ISO") {
+ SALT_MASTER_IP=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cfg01_ip").trim().split().last()
+ def ADMIN_NETWORK_GW=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_gateway_ip").trim().split().last()
+ shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
+ }
+
+ stage("Upload Ubuntu image for foundation node") {
+ shared.run_cmd("""\
+ if ! openstack --insecure image show ${ubuntu_foundation_image_name} -f value -c name; then
+ wget -O ./${ubuntu_foundation_image_name} https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+ openstack --insecure image create ${ubuntu_foundation_image_name} --file ./${ubuntu_foundation_image_name} --disk-format qcow2 --container-format bare
+ rm ./${ubuntu_foundation_image_name}
+ else
+ echo Image ${ubuntu_foundation_image_name} already exists
+ fi
+ """)
+ }
+
+ stage("Upload cfg01-day01 and VCP images") {
+ shared.run_cmd("""\
+ # export OS_IDENTITY_API_VERSION=3
+ # export OS_AUTH_URL=${OS_AUTH_URL}
+ # export OS_USERNAME=${OS_USERNAME}
+ # export OS_PASSWORD=${OS_PASSWORD}
+ # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+ openstack --insecure image show ${cfg01_day01_image_name} -f value -c name || openstack --insecure image create ${cfg01_day01_image_name} --file ${IMAGE_PATH_CFG01_DAY01} --disk-format qcow2 --container-format bare
+ openstack --insecure image show ${ubuntu_vcp_image_name} -f value -c name || openstack --insecure image create ${ubuntu_vcp_image_name} --file ${MCP_IMAGE_PATH1604} --disk-format qcow2 --container-format bare
+ """)
+ }
+
+ stage("Upload generated config drive ISO into volume on cfg01 node") {
+ shared.run_cmd("""\
+ # export OS_IDENTITY_API_VERSION=3
+ # export OS_AUTH_URL=${OS_AUTH_URL}
+ # export OS_USERNAME=${OS_USERNAME}
+ # export OS_PASSWORD=${OS_PASSWORD}
+ # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+ openstack --insecure image delete cfg01.${ENV_NAME}-config-drive.iso || true
+ sleep 3
+ openstack --insecure image create cfg01.${ENV_NAME}-config-drive.iso --file /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --disk-format iso --container-format bare
+ """)
+ }
+
+ stage("Create Heat stack '${ENV_NAME}'") {
+ // Create stack and wait for CREATE_COMPLETED status, manual analog:
+ // openstack --insecure stack create ${ENV_NAME} \
+ // --template ./tcp_tests/templates/${LAB_CONFIG_NAME}/underlay.hot \
+ // --environment ./tcp_tests/templates/_heat_environments/${LAB_PARAM_DEFAULTS} \
+ // --parameter env_name=${ENV_NAME} --parameter mcp_version=${MCP_VERSION}
+ shared.run_cmd("""\
+ export BOOTSTRAP_TIMEOUT=3600
+ export ENV_MANAGER=heat
+ export TEST_GROUP=test_create_environment
+ export SHUTDOWN_ENV_ON_TEARDOWN=false
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ export UPDATE_VERSION=${UPDATE_VERSION}
+ export ENV_NAME=${ENV_NAME}
+ export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+ export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+ export LOG_NAME=swarm_test_create_environment.log
+ py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+ """)
+ }
+
+ stage("Add the Jenkins slave node") {
+ def jenkins_slave_ip_value_name = "foundation_public_ip"
+ def jenkins_slave_ip = shared.run_cmd_stdout("openstack --insecure stack output show ${ENV_NAME} ${jenkins_slave_ip_value_name} -f value -c output_value").trim().split().last()
+ def jenkins_slave_executors = 2
+ common.printMsg("JENKINS_SLAVE_NODE_NAME=${JENKINS_SLAVE_NODE_NAME}", "green")
+ common.printMsg("JENKINS_SLAVE_IP=${jenkins_slave_ip}", "green")
+
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : "${CREATE_JENKINS_NODE_CREDENTIALS}",
+ passwordVariable: 'JENKINS_PASS',
+ usernameVariable: 'JENKINS_USER']
+ ]) {
+
+ script_delete_agent = ("""\
+ CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+ curl -w '%{http_code}' -o /dev/null \
+ -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+ -H \"Content-Type:application/x-www-form-urlencoded\" \
+ -H \"\$CRUMB\" \
+ \"\${JENKINS_URL}/computer/\${JENKINS_SLAVE_NODE_NAME}/doDelete\" \
+ --request \'POST\' --data \'\'
+ sleep 10
+ """)
+
+ script_create_agent = ("""\
+ CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+
+ curl -L -sS -w '%{http_code}' -o /dev/null \
+ -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+ -H \"Content-Type:application/x-www-form-urlencoded\" \
+ -H \"\$CRUMB\" \
+ -X POST -d 'json={\
+ \"name\": \"'\"\$JENKINS_SLAVE_NODE_NAME\"'\", \
+ \"nodeDescription\": \"'\"\$ENV_NAME\"'\", \
+ \"numExecutors\": \"'\"${jenkins_slave_executors}\"'\", \
+ \"remoteFS\": \"'\"/home/jenkins/workspace\"'\", \
+ \"labelString\": \"'\"\$ENV_NAME\"'\", \
+ \"mode\": \"EXCLUSIVE\", \
+ \"\": [\"hudson.plugins.sshslaves.SSHLauncher\", \"hudson.slaves.RetentionStrategy\$Always\"], \
+ \"launcher\": {\
+ \"stapler-class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+ \"\$class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+ \"host\": \"'\"${jenkins_slave_ip}\"'\", \
+ \"credentialsId\": \"'\"\$ACCESS_JENKINS_NODE_CREDENTIALS\"'\", \
+ \"port\": \"'\"22\"'\", \
+ \"javaPath\": \"\", \
+ \"jvmOptions\": \"\", \
+ \"prefixStartSlaveCmd\": \"\", \
+ \"suffixStartSlaveCmd\": \"\", \
+ \"launchTimeoutSeconds\": \"\", \
+ \"maxNumRetries\": \"\", \
+ \"retryWaitTime\": \"\", \
+ \"sshHostKeyVerificationStrategy\": {\
+ \"\$class\": \"hudson.plugins.sshslaves.verifiers.NonVerifyingKeyVerificationStrategy\" \
+ }, \
+ \"tcpNoDelay\": \"true\"\
+ }, \
+ \"retentionStrategy\": {\
+ \"stapler-class\": \"hudson.slaves.RetentionStrategy\$Always\", \
+ \"\$class\": \"hudson.slaves.RetentionStrategy\$Always\"\
+ }, \
+ \"nodeProperties\": {\
+ \"stapler-class-bag\": \"true\"\
+ }, \
+ \"type\": \"hudson.slaves.DumbSlave\", \
+ \"crumb\": \"'\"\$CRUMB\"'\"}' \
+ \"\${JENKINS_URL}/computer/doCreateItem?name=\${JENKINS_SLAVE_NODE_NAME}&type=hudson.slaves.DumbSlave\"
+ """)
+ shared.verbose_sh(script_delete_agent, true, false, true)
+ shared.verbose_sh(script_create_agent, true, false, true)
+ timeout(time: 30, unit: 'MINUTES') {
+ node("${JENKINS_SLAVE_NODE_NAME}") {
+ sh "echo 'ok'"
+ println "Jenkins agent is available now and can executes commands"
+ }
+ }
+ // Store jenkins agent IP address
+ jenkins_agent_description = "ssh jenkins@${jenkins_slave_ip} # foundation node with Jenkins agent <a href=${JENKINS_URL}/computer/${JENKINS_SLAVE_NODE_NAME}>${JENKINS_SLAVE_NODE_NAME}</a><br>ssh root@${SALT_MASTER_IP} # cfg01 node<br>"
+ writeFile(file: "jenkins_agent_description.txt", text: jenkins_agent_description, encoding: "UTF-8")
+
+ } // withCredentials
+
+ }// stage
+
+ } // withCredentials
+
+ } // dir
+ } // node
-node ("${JENKINS_SLAVE_NODE_NAME}") {
- dir("${PARENT_WORKSPACE}") {
+ node ("${JENKINS_SLAVE_NODE_NAME}") {
+ dir("${PARENT_WORKSPACE}") {
- stage("Clean the environment and clone tcp-qa") {
- deleteDir()
- shared.verbose_sh("""\
- [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv /home/jenkins/venv_testrail_reporter
- """, true, false, true)
- shared.run_cmd("""\
- . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U
- """)
- shared.verbose_sh("""\
- [ -d /home/jenkins/fuel-devops30 ] || virtualenv /home/jenkins/fuel-devops30
- """, true, false, true)
- shared.run_cmd("""\
- git clone https://gerrit.mcp.mirantis.com/mcp/tcp-qa ${PARENT_WORKSPACE}
- """)
- shared.update_working_dir()
- }
-
- withCredentials([
- [$class : 'UsernamePasswordMultiBinding',
- credentialsId : env.OS_CREDENTIALS,
- passwordVariable: 'OS_PASSWORD',
- usernameVariable: 'OS_USERNAME']
- ]) {
-
-
- stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
- def xml_report_name = "deploy_salt.xml"
- try {
- // deploy_salt.xml
- shared.run_sh("""\
- export ENV_NAME=${ENV_NAME}
- export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
- export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
- export ENV_MANAGER=heat
- export SHUTDOWN_ENV_ON_TEARDOWN=false
- export BOOTSTRAP_TIMEOUT=3600
- export PYTHONIOENCODING=UTF-8
- export REPOSITORY_SUITE=${MCP_VERSION}
- export UPDATE_VERSION=${UPDATE_VERSION}
- export TEST_GROUP=test_bootstrap_salt
- export LOG_NAME=swarm_test_bootstrap_salt.log
- py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
+ stage("Clean the environment and clone tcp-qa") {
+ deleteDir()
+ shared.verbose_sh("""\
+ [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv /home/jenkins/venv_testrail_reporter
+ """, true, false, true)
+ shared.run_cmd("""\
+ . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U
""")
- // Wait for jenkins to start and IO calm down
- sleep(60)
-
- } catch (e) {
- common.printMsg("Saltstack cluster deploy is failed", "purple")
- if (fileExists(xml_report_name)) {
- shared.download_logs("deploy_salt_${ENV_NAME}")
- def String junit_report_xml = readFile(xml_report_name)
- def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
- throw new Exception(junit_report_xml_pretty)
- } else {
- throw e
- }
- } finally {
- // TODO(ddmitriev): add checks for salt cluster
+ shared.verbose_sh("""\
+ [ -d /home/jenkins/fuel-devops30 ] || virtualenv /home/jenkins/fuel-devops30
+ """, true, false, true)
+ shared.run_cmd("""\
+ git clone https://gerrit.mcp.mirantis.com/mcp/tcp-qa ${PARENT_WORKSPACE}
+ """)
+ shared.update_working_dir()
}
- } // stage
- } // withCredentials
- } // dir
-} // node
+
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : env.OS_CREDENTIALS,
+ passwordVariable: 'OS_PASSWORD',
+ usernameVariable: 'OS_USERNAME']
+ ]) {
+
+
+ stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+ def xml_report_name = "deploy_salt.xml"
+ try {
+ // deploy_salt.xml
+ shared.run_sh("""\
+ export ENV_NAME=${ENV_NAME}
+ export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+ export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+ export ENV_MANAGER=heat
+ export SHUTDOWN_ENV_ON_TEARDOWN=false
+ export BOOTSTRAP_TIMEOUT=3600
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ export UPDATE_VERSION=${UPDATE_VERSION}
+ export TEST_GROUP=test_bootstrap_salt
+ export LOG_NAME=swarm_test_bootstrap_salt.log
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
+ """)
+ // Wait for jenkins to start and IO calm down
+ sleep(60)
+
+ } catch (e) {
+ common.printMsg("Saltstack cluster deploy is failed", "purple")
+ if (fileExists(xml_report_name)) {
+ shared.download_logs("deploy_salt_${ENV_NAME}")
+ def String junit_report_xml = readFile(xml_report_name)
+ def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+ throw new Exception(junit_report_xml_pretty)
+ } else {
+ throw e
+ }
+ } finally {
+ // TODO(ddmitriev): add checks for salt cluster
+ }
+ } // stage
+ } // withCredentials
+ } // dir
+ } // node
+ } // timestamps
} // timeout
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 6014b9e..0cdc33f 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -30,73 +30,74 @@
def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
timeout(time: install_timeout + 600, unit: 'SECONDS') {
-
- node ("${PARENT_NODE_NAME}") {
- if (! fileExists("${PARENT_WORKSPACE}")) {
- error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
- }
- dir("${PARENT_WORKSPACE}") {
-
- if (! env.STACK_INSTALL) {
- error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+ timestamps {
+ node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
+ dir("${PARENT_WORKSPACE}") {
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
- try {
- // Install core and cicd
- stage("Run Jenkins job on salt-master [deploy_openstack:${env.STACK_INSTALL}]") {
- shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
- }
- stage("Create env_jenkins_cicd and env_k8s files") {
- shared.run_cmd("""\
- export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
- python ./tcp_tests/utils/create_env_jenkins_cicd.py
- """)
+ if (! env.STACK_INSTALL) {
+ error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
}
- for (stack in "${env.STACK_INSTALL}".split(",")) {
- stage("Sanity check the deployed component [${stack}]") {
- shared.sanity_check_component(stack)
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
+ }
+ }
- // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
- // then make a copy for the created snapshot to allow the system
- // tests to revert this snapshot along with the metadata from the INI file.
+ try {
+ // Install core and cicd
+ stage("Run Jenkins job on salt-master [deploy_openstack:${env.STACK_INSTALL}]") {
+ shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
+ }
+ stage("Create env_jenkins_cicd and env_k8s files") {
shared.run_cmd("""\
- if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
- cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
- fi
+ export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
+ python ./tcp_tests/utils/create_env_jenkins_cicd.py
""")
}
- } // for
- if (make_snapshot_stages) {
- stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
- shared.devops_snapshot(env.STACK_INSTALL)
+ for (stack in "${env.STACK_INSTALL}".split(",")) {
+ stage("Sanity check the deployed component [${stack}]") {
+ shared.sanity_check_component(stack)
+
+ // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
+ // then make a copy for the created snapshot to allow the system
+ // tests to revert this snapshot along with the metadata from the INI file.
+ shared.run_cmd("""\
+ if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
+ cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
+ fi
+ """)
+ }
+ } // for
+
+ if (make_snapshot_stages) {
+ stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
+ shared.devops_snapshot(env.STACK_INSTALL)
+ }
+ }
+
+ } catch (e) {
+ common.printMsg("Job is failed", "purple")
+ shared.download_logs("deploy_drivetrain_${ENV_NAME}")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for cicd cluster
+ if (make_snapshot_stages) {
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
}
- } catch (e) {
- common.printMsg("Job is failed", "purple")
- shared.download_logs("deploy_drivetrain_${ENV_NAME}")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- // TODO(ddmitriev): add checks for cicd cluster
- if (make_snapshot_stages) {
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
- }
- }
- }
-
- } // dir
- } // node
+ } // dir
+ } // node
+ } // timestamps
} // timeout
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index b8afc35..7377958 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -30,67 +30,68 @@
def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
timeout(time: install_timeout + 600, unit: 'SECONDS') {
-
- node ("${PARENT_NODE_NAME}") {
- if (! fileExists("${PARENT_WORKSPACE}")) {
- error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
- }
- dir("${PARENT_WORKSPACE}") {
-
- if (! env.STACK_INSTALL) {
- error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+ timestamps {
+ node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
+ dir("${PARENT_WORKSPACE}") {
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
- try {
- // Install the cluster
- stage("Run Jenkins job on CICD [deploy_openstack:${env.STACK_INSTALL}]") {
- shared.run_job_on_cicd_nodes(env.STACK_INSTALL, install_timeout)
+ if (! env.STACK_INSTALL) {
+ error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
}
- for (stack in "${env.STACK_INSTALL}".split(",")) {
- stage("Sanity check the deployed component [${stack}]") {
- shared.sanity_check_component(stack)
-
- // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
- // then make a copy for the created snapshot to allow the system
- // tests to revert this snapshot along with the metadata from the INI file.
- shared.run_cmd("""\
- if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
- cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
- fi
- """)
- }
- } // for
-
- if (make_snapshot_stages) {
- stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
- shared.devops_snapshot(env.STACK_INSTALL)
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
}
}
- } catch (e) {
- common.printMsg("Job is failed", "purple")
- shared.download_logs("deploy_platform_${ENV_NAME}")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- // TODO(ddmitriev): add checks for the installed stacks
- if (make_snapshot_stages) {
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ try {
+ // Install the cluster
+ stage("Run Jenkins job on CICD [deploy_openstack:${env.STACK_INSTALL}]") {
+ shared.run_job_on_cicd_nodes(env.STACK_INSTALL, install_timeout)
+ }
+
+ for (stack in "${env.STACK_INSTALL}".split(",")) {
+ stage("Sanity check the deployed component [${stack}]") {
+ shared.sanity_check_component(stack)
+
+ // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
+ // then make a copy for the created snapshot to allow the system
+ // tests to revert this snapshot along with the metadata from the INI file.
+ shared.run_cmd("""\
+ if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
+ cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
+ fi
+ """)
+ }
+ } // for
+
+ if (make_snapshot_stages) {
+ stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
+ shared.devops_snapshot(env.STACK_INSTALL)
+ }
+ }
+
+ } catch (e) {
+ common.printMsg("Job is failed", "purple")
+ shared.download_logs("deploy_platform_${ENV_NAME}")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ // TODO(ddmitriev): add checks for the installed stacks
+ if (make_snapshot_stages) {
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
}
}
- }
- } // dir
- } // node
+ } // dir
+ } // node
+ } //timestamps
} // timeout
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index f7ad7c3..8ef7122 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -34,85 +34,87 @@
currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
timeout(time: 10, unit: 'HOURS') {
-node ("${PARENT_NODE_NAME}") {
- if (! fileExists("${PARENT_WORKSPACE}")) {
- error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
- }
- dir("${PARENT_WORKSPACE}") {
- try {
-
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
+ timestamps {
+ node ("${PARENT_NODE_NAME}") {
+ if (! fileExists("${PARENT_WORKSPACE}")) {
+ error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
}
+ dir("${PARENT_WORKSPACE}") {
+ try {
- stage("Run tests") {
- def steps = shared.get_steps_list(PASSED_STEPS)
- def sources = """\
- cd ${PARENT_WORKSPACE}
- export ENV_NAME=${ENV_NAME}
- . ./tcp_tests/utils/env_salt"""
- if (steps.contains('k8s')) {
- sources += """
- . ./tcp_tests/utils/env_k8s\n"""
- }
- if (steps.contains('openstack')) {
- sources += """
- export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
- export TEMPEST_TARGET=${TEMPEST_TARGET}
- # TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
- }
- def installed = steps.collect {"""\
- export ${it}_installed=true"""}.join("\n")
-
- shared.run_sh(sources + installed + """
- export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
- export ENV_MANAGER=$ENV_MANAGER # use 'hardware' fixture to manage fuel-devops environment
- export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
- export salt_master_port=6969
- export SALT_USER=\$SALTAPI_USER
- export SALT_PASSWORD=\$SALTAPI_PASS
-
- export LOG_NAME=swarm_run_pytest.log
- py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
-
- """)
-
- def snapshot_name = "test_completed"
- shared.download_logs("test_completed_${ENV_NAME}")
-
- if (make_snapshot_stages) {
- shared.run_cmd("""\
- dos.py suspend ${ENV_NAME}
- dos.py snapshot ${ENV_NAME} ${snapshot_name}
- """)
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
- shared.run_cmd("""\
- dos.py resume ${ENV_NAME}
- """)
+ if (env.TCP_QA_REFS) {
+ stage("Update working dir to patch ${TCP_QA_REFS}") {
+ shared.update_working_dir()
+ }
}
- shared.devops_snapshot_info(snapshot_name)
- }
- }
- } catch (e) {
- common.printMsg("Job is failed", "purple")
- // Downloading logs usually not needed here
- // because tests should use the decorator @pytest.mark.grab_versions
- // shared.download_logs("test_failed_${ENV_NAME}")
- throw e
- } finally {
- // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
- // and report appropriate data to TestRail
- if (make_snapshot_stages) {
- if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME}
- """)
+ stage("Run tests") {
+ def steps = shared.get_steps_list(PASSED_STEPS)
+ def sources = """\
+ cd ${PARENT_WORKSPACE}
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt"""
+ if (steps.contains('k8s')) {
+ sources += """
+ . ./tcp_tests/utils/env_k8s\n"""
+ }
+ if (steps.contains('openstack')) {
+ sources += """
+ export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
+ export TEMPEST_TARGET=${TEMPEST_TARGET}
+ # TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
+ }
+ def installed = steps.collect {"""\
+ export ${it}_installed=true"""}.join("\n")
+
+ shared.run_sh(sources + installed + """
+ export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
+ export ENV_MANAGER=$ENV_MANAGER # use 'hardware' fixture to manage fuel-devops environment
+ export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
+ export salt_master_port=6969
+ export SALT_USER=\$SALTAPI_USER
+ export SALT_PASSWORD=\$SALTAPI_PASS
+
+ export LOG_NAME=swarm_run_pytest.log
+ py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+ """)
+
+ def snapshot_name = "test_completed"
+ shared.download_logs("test_completed_${ENV_NAME}")
+
+ if (make_snapshot_stages) {
+ shared.run_cmd("""\
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} ${snapshot_name}
+ """)
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME}
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
+ }
+ }
+
+ } catch (e) {
+ common.printMsg("Job is failed", "purple")
+ // Downloading logs usually not needed here
+ // because tests should use the decorator @pytest.mark.grab_versions
+ // shared.download_logs("test_failed_${ENV_NAME}")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ if (make_snapshot_stages) {
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
+ }
}
}
- }
- }
-}
+ } //node
+ } // timestamps
} // timeout