blob: b615cdc3db2306cf25ad217fda1e1a7262fd0e91 [file] [log] [blame]
/**
*
* Build mirror/aptly/apt01 image pipeline
*
* PACKER_URL = 'https://releases.hashicorp.com/packer/1.2.4/packer_1.2.4_linux_amd64.zip'
* PACKER_ZIP_MD5 = '997274e80ae41102eecf9df2e5b67860'
* PACKER_ARGS = '-debug'
* BUILD_ONLY = 'openstack|qemu'
* CLEANUP_AFTER = wipe workspace AFTER build
* IMAGE_NAME = Name of the result image.
* TIMESTAMP_INAME = True|false. If true - image will be uploaded to backend with IMAGE_NAME + timestamp
*
* OPENSTACK_OPENRC_YAML: aml of key:value variables required for openstack venv
* example:
* ---
* OS_TENANT_ID: 'xxxx'
* OS_TENANT_NAME: "mcp-oscore-ci"
*
* EXTRA_VARIABLES_YAML - Yaml of key:value variables required for template.json
* example:
* ---
* IMAGE_NAME: 'ubuntu-16.04-proposed'
*
* CREDENTIALS_ID = Global jenkins cred for clone DEFAULT_GIT_URL
* DEFAULT_GIT_URL
* REFSPEC
*
* OS_VERSION = OpenStack version
* OS_CREDENTIALS_ID = ID of credentials for OpenStack API stored in Jenkins.
* OS_URL = Keystone auth endpoint of the OpenStack.
* OS_PROJECT =
*
* PUBLISH_BACKEND = local,glance,http
* UPLOAD_URL = URL of an WebDAV used to upload the image after creating.(Only in case PUBLISH_BACKEND == http)
*/
// Load shared libs
def common = new com.mirantis.mk.Common()
def openstack = new com.mirantis.mk.Openstack()
def gerrit = new com.mirantis.mk.Gerrit()
def date = new Date()
def dateTime = date.format("ddMMyyyy-HHmmss")
//
def job_env = env.getEnvironment().findAll { k, v -> v }
gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
/////
extra_vars = readYaml text: job_env.get('EXTRA_VARIABLES_YAML','').trim()
// FIXME: os_openrc should be refactored.
os_openrc = readYaml text: job_env.get('OPENSTACK_OPENRC_YAML','').trim()
if (job_env.get('TIMESTAMP_INAME', false).toBoolean()) {
imageName = job_env.IMAGE_NAME + "-" + dateTime
} else {
imageName = job_env.IMAGE_NAME
}
// Overwrite IMAGE_NAME in template.json with expected
extra_vars['IMAGE_NAME'] = imageName
// Fix some variables
job_env['CLEANUP_AFTER'] = job_env.CLEANUP_AFTER.toBoolean()
job_env['SKIP_UPLOAD'] = job_env.SKIP_UPLOAD.toBoolean()
job_env['CLEANUP_AFTER'] = job_env.CLEANUP_AFTER.toBoolean()
job_env['BUILD_ONLY'] = job_env.BUILD_ONLY.toLowerCase()
job_env['PUBLISH_BACKEND'] = job_env.PUBLISH_BACKEND.toLowerCase()
publishBackends = job_env['PUBLISH_BACKEND'].split(',')
//
defaultGitRef = job_env.get('REFSPEC', 'HEAD')
defaultGitUrl = job_env.get('DEFAULT_GIT_URL', null)
slaveNode = (env.SLAVE_NODE ?: 'virtual')
// Self-check
for (String req_v : ['BUILD_OS', 'BUILD_ONLY','IMAGE_NAME'] ) {
if (!job_env.get(req_v, false)) {
throw new Exception("${req_v} not set!")
}
}
def MapToList(input_map) {
/**
* Convert dict in bash-like list
*/
def data_list = []
for (i = 0; i < input_map.size(); i++) {
data = ''
data = input_map.keySet()[i] + "=" + input_map.values()[i]
data_list.add(data)
}
return data_list
}
// Necessary to have ~100G of volume on node for build offline image. Code below chooses huge available slave.
def getHugeSlave() {
slaves = nodesByLabel("virtual")
def targetSlave = ''
for (slave in slaves) {
node(slave){
outputMsg = sh(returnStdout: true, script: '''#!/bin/bash
df --block-size=G / | awk '{print $4}' | sed -n 2p | sed 's/.$//'
''')
}
if(outputMsg.toInteger() >= 120){
targetSlave = slave
break
}
}
return targetSlave
}
nodeForBuild = getHugeSlave()
timeout(time: 6, unit: 'HOURS') {
node(nodeForBuild) {
def workspace = common.getWorkspace()
def openstackEnv = "${workspace}/venv"
def rcFile = ''
creds = common.getPasswordCredentials(job_env.CREDENTIALS_ID)
stage("checkout") {
if (defaultGitRef && defaultGitUrl) {
checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", job_env.CREDENTIALS_ID)
} else {
throw new Exception("Cannot checkout gerrit patchset: DEFAULT_GIT_URL is null")
}
}
if (job_env.BUILD_ONLY == 'openstack' || 'glance' in publishBackends) {
rcFile = openstack.createOpenstackEnv(workspace, os_openrc['OS_AUTH_URL'], os_openrc['OS_CREDENTIALS_ID'], os_openrc['OS_PROJECT_NAME'], "default", "", "default", "3", "")
}
try {
def _artifact_dir = "${workspace}/artifacts"
def _artifact_list = []
def ImagesCacheFolder = "${workspace}/../${env.JOB_NAME}_cache/"
stage("Prepare env") {
if (!fileExists("${workspace}/tmp")) {
sh "mkdir -p ${workspace}/tmp"
}
if (!fileExists(ImagesCacheFolder)) {
sh "mkdir -p ${ImagesCacheFolder}"
}
if (!fileExists(_artifact_dir)) {
sh "mkdir -p ${_artifact_dir}"
}
if (!fileExists("bin")) {
common.infoMsg("Downloading packer")
sh "mkdir -p bin"
dir("bin") {
def zipname = sh(script: "basename ${job_env.PACKER_URL}", returnStdout: true).trim()
sh(script: "wget --quiet ${job_env.PACKER_URL}", returnStdout: true)
sh "echo \"${job_env.PACKER_ZIP_MD5} ${zipname}\" >> md5sum"
sh(script: "md5sum -c --status md5sum", returnStdout: true)
sh "unzip ${zipname}"
}
}
if (!fileExists("${job_env.BUILD_OS}/images")) {
// clean images dir before building
sh(script: "rm -rf ${job_env.BUILD_OS}/images/*", returnStatus: true)
}
}
stage("Build Instance") {
def _packer_args = job_env.get('PACKER_ARGS', '')
def _packer_log = "${workspace}/packer.log"
// clean old log, for correct status grepping
if (fileExists(_packer_log)) {
sh "rm -v ${_packer_log}"
}
dir("${workspace}/${job_env.BUILD_OS}/") {
if (fileExists("config-drive")) {
def model = extra_vars.get('CLUSTER_MODEL', '')
if (model != "") {
checkout([
$class : 'GitSCM',
branches : [[name: 'FETCH_HEAD']],
extensions : [
[$class: 'RelativeTargetDirectory', relativeTargetDir: 'config-drive/model'],
[$class: 'SubmoduleOption', disableSubmodules: false, parentCredentials: true, recursiveSubmodules: true, trackingSubmodules: false],
],
userRemoteConfigs : [[url: model, refspec: extra_vars.get('CLUSTER_MODEL_REF', 'master'), credentialsId: gerritCredentials]]
])
def clusterName = extra_vars.get('CLUSTER_NAME', '')
def mcpVersion = extra_vars.get('MCP_VERSION', '')
if (clusterName == 'mcp-offline' && mcpVersion != '') {
def filePath = 'classes/cluster/mcp-offline/infra/apt01_dummy.yml'
sh "sed -i 's/mcp_version: .*/mcp_version: ${mcpVersion}/' config-drive/model/${filePath}"
// speed up build by using internal mirrors in the same region
if (os_openrc.get('CLOUD_REGION', 'US').matches("[Ee][Uu]")) {
sh "sed -i 's/debmirror_mirrors_host: .*/debmirror_mirrors_host: mirror-eu.mcp.mirantis.net/' config-drive/model/${filePath}"
}
}
def reclass_url = extra_vars.get('RECLASS_SYSTEM_URL', '')
def reclass_ref = extra_vars.get('RECLASS_SYSTEM_REF', '')
if (reclass_url && reclass_ref) {
if (fileExists('config-drive/model/classes/system')) {
dir('config-drive/model/classes/system') {
checkout([
$class : 'GitSCM',
branches : [[name: 'FETCH_HEAD']],
userRemoteConfigs : [[url: reclass_url, refspec: reclass_ref, credentialsId: gerritCredentials]]
])
}
}
}
}
def scripts = extra_vars.get('GIT_SALT_FORMULAS_SCRIPTS', '')
if (scripts != "") {
checkout([
$class : 'GitSCM',
branches : [[name: 'FETCH_HEAD']],
extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'config-drive/salt_scripts']],
userRemoteConfigs: [[url: scripts, refspec: extra_vars.get('SCRIPTS_REF', 'master'), credentialsId: gerritCredentials]]
])
}
common.infoMsg("Creating cloud-config drive")
def isoFile = "config-drive/cloudata.iso"
if (fileExists(isoFile)) {
sh "rm -v ${isoFile}"
}
// This is left for backward-compatibility
if (fileExists("config-drive/user-data.yaml")) {
sh "mv config-drive/user-data.yaml config-drive/user-data"
if (!fileExists("config-drive/meta-data")) {
sh "echo 'hostname: ubuntu' > config-drive/meta-data"
}
}
sh "mkisofs -o ${isoFile} -V cidata -r -J --quiet config-drive"
archiveArtifacts artifacts: "${isoFile}"
}
}
if (job_env.BUILD_ONLY == "openstack") {
dir("${workspace}/${job_env.BUILD_OS}/") {
extra_vars_list = MapToList(extra_vars)
withEnv(["PATH=${env.PATH}:${workspace}/bin",
"PACKER_LOG_PATH=${_packer_log}",
"PACKER_LOG=1",
"TMPDIR=${workspace}/tmp",
"OS_USERNAME=${creds.username.toString()}",
"OS_PASSWORD=${creds.password.toString()}"] + extra_vars_list) {
common.infoMsg("Run build with:")
sh(script: 'printenv|sort')
common.infoMsg("Free disk space by 'df -h' command:")
sh(script: 'df -h')
sh(script: "set -xe; packer build -only='openstack' ${_packer_args} -parallel=false template.json")
_os_private = "${workspace}/${job_env.BUILD_OS}/os_${job_env.BUILD_OS}.pem"
if (fileExists(_os_private)) {
common.infoMsg("Packer private key:")
sh "cat ${_os_private}"
}
def packerStatus = sh(script: "grep \"Some builds didn't complete successfully and had errors\" ${_packer_log}", returnStatus: true)
// grep returns 0 if find something
if (packerStatus != 0) {
common.infoMsg("Openstack instance build complete")
} else {
throw new Exception("Openstack Packer build failed")
}
common.retry(3, 5) {
common.infoMsg("Attempt download openstack image..")
openstack.runOpenstackCommand("openstack image save --file ${_artifact_dir}/${imageName}.qcow2 ${imageName}", rcFile, openstackEnv)
}
}
}
} else if (job_env.BUILD_ONLY == 'qemu') {
dir("${workspace}/${job_env.BUILD_OS}/") {
extra_vars_list = MapToList(extra_vars)
withEnv(["PATH=${env.PATH}:${workspace}/bin",
"PACKER_LOG_PATH=${_packer_log}",
"PACKER_LOG=1",
"TMPDIR=${workspace}/tmp",
"PACKER_IMAGES_CACHE=${ImagesCacheFolder}"] + extra_vars_list) {
common.infoMsg("Run build with:")
sh(script: 'printenv|sort')
common.infoMsg("Free disk space by 'df -h' command:")
sh(script: 'df -h')
sh(script: "set -xe ; packer build -on-error=ask -only='qemu' ${_packer_args} -parallel=false template.json".toString())
def packerStatus = sh(script: "grep \"Some builds didn't complete successfully and had errors\" ${PACKER_LOG_PATH}", returnStatus: true)
// grep returns 0 if find something
if (packerStatus != 0) {
common.infoMsg("qemu instance build completed successfully")
} else {
throw new Exception("qemu instance build failed")
}
// collect artifacts
// TODO make it possible, process multiply artifacts by one run.
dir('images/') {
sh(script: 'find .')
def _files = findFiles(glob: "*qemu*/*${imageName}*.qcow2")
if (_files.size() > 1) {
common.warningMsg("Multiply artifacts detected!Only first one will be processed!")
} else if (_files.size() == 0) {
throw new Exception("No artifacts detected!BUILD_ONLY=${env.BUILD_ONLY} failed!")
}
for (String x : _files) {
_file = sh(script: "set -x ; readlink -f ${x}", returnStdout: true)
sh(script: "mv -v ${x} ${_artifact_dir}/${imageName}.qcow2")
// Save filename to list
_artifact_list.add("${imageName}.qcow2")
}
}
}
}
} else {
throw new Exception("Unexpected BUILD_ONLY=${env.BUILD_ONLY} target!")
}
}
stage("Publish artifacts") {
dir(_artifact_dir) {
def published = false
common.infoMsg("Processing md5 for artifacts")
for (String x : _artifact_list) {
_md5 = sh(script: "md5sum ${x} > ${x}.md5; cat ${x}.md5", returnStdout: true).trim()
_size = sh(script: "ls -alh ${x}", returnStdout: true).trim()
common.infoMsg("Artifact file: ${_size}\n${_md5}")
}
if ('local' in publishBackends) {
common.infoMsg("Uploading to: local")
common.infoMsg("For local publish target - nothing to do, all files in: ${_artifact_dir}")
if (job_env.get('CLEANUP_AFTER', false)) {
common.warningMsg("You are trying to use 'local' publish method, along with enabled CLEANUP_AFTER! ")
common.warningMsg("Disabling CLEANUP_AFTER option, to save you'r data ;) ")
job_env.CLEANUP_AFTER = false
}
published = true
}
if ('glance' in publishBackends) {
common.infoMsg("Uploading to: glance-openstack")
if (fileExists("${workspace}/venv")) {
common.infoMsg("cleaning virtualenv at:${workspace}/venv")
sh(script: "rm -rf ${workspace}/venv", returnStatus: true)
}
openstack.setupOpenstackVirtualenv(openstackEnv)
for (String x : findFiles(glob: "*.*")) {
if (x.getName().endsWith('.qcow2')) {
_md5sum = sh(script: "cat ${x}.md5", returnStdout: true).trim().split()[0]
_property = "--property data=${dateTime} --property md5sum=${_md5sum} --property build='${env.BUILD_URL}' --container-format bare --disk-format qcow2"
_cmdline = String.format("glance image-create %s --name '%s' --file %s", _property, imageName, x)
openstack.runOpenstackCommand(_cmdline, rcFile, openstackEnv)
}
}
currentBuild.description = "${imageName}.qcow2 uploaded tenant: ${os_openrc['OS_PROJECT_NAME']}"
published = true
}
if ('http' in publishBackends) {
for (String u_file : findFiles(glob: '*.*')) {
common.infoMsg("Uploading image ${u_file}")
def uploadImageStatus = ''
common.retry(3, 5) {
uploadImageStatus = sh(script: "curl -f -T ${u_file} ${job_env.UPLOAD_URL}", returnStatus: true)
if (uploadImageStatus != 0) {
throw new Exception("Uploading file: ${u_file} failed!")
}
}
// Fixme for correct path ?
currentBuild.description = "<a href='http://images.mcp.mirantis.net/${imageName}.qcow2'>${imageName}.qcow2</a>"
}
published = true
}
if (! published) {
throw new Exception("Unsupported publish backend: ${publishBackends}")
}
}
}
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
currentBuild.result = "FAILURE"
throw e
} finally {
if (job_env.get('CLEANUP_AFTER', false)) {
dir(workspace) {
sh "find . -mindepth 1 -delete || true"
}
if (job_env.BUILD_ONLY == 'openstack') {
common.warningMsg("openstack Env cleanup not implemented yet!")
}
} else {
common.warningMsg("Env has not been cleaned!Please cleanup it manualy!")
}
}
}
}