Merge "Add cvp-func job code"
diff --git a/build-mirror-image.groovy b/build-mirror-image.groovy
index 822d398..0aff603 100644
--- a/build-mirror-image.groovy
+++ b/build-mirror-image.groovy
@@ -44,28 +44,12 @@
def uploadImageStatus = ""
def uploadMd5Status = ""
-def retry(int times = 5, int delay = 0, Closure body) {
- int retries = 0
- def exceptions = []
- while(retries++ < times) {
- try {
- return body.call()
- } catch(e) {
- sleep(delay)
- }
- }
- currentBuild.result = "FAILURE"
- throw new Exception("Failed after $times retries")
-}
-
timeout(time: 12, unit: 'HOURS') {
node("python&&disk-xl") {
try {
def workspace = common.getWorkspace()
openstackEnv = String.format("%s/venv", workspace)
venvPepper = String.format("%s/venvPepper", workspace)
- rcFile = openstack.createOpenstackEnv(openstackEnv, OS_URL, OS_CREDENTIALS_ID, OS_PROJECT, "default", "", "default", "2", "")
- def openstackVersion = OS_VERSION
VM_IP_DELAY = VM_IP_DELAY as Integer
VM_IP_RETRIES = VM_IP_RETRIES as Integer
@@ -79,10 +63,11 @@
}
sh "wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/salt-bootstrap.sh"
- openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+ openstack.setupOpenstackVirtualenv(openstackEnv, OS_VERSION)
}
stage("Spawn Instance"){
+ rcFile = openstack.createOpenstackEnv(OS_URL, OS_CREDENTIALS_ID, OS_PROJECT, "default", "", "default", "2", "")
privateKey = openstack.runOpenstackCommand("openstack keypair create mcp-offline-keypair-${dateTime}", rcFile, openstackEnv)
common.infoMsg(privateKey)
@@ -94,21 +79,26 @@
sh "envsubst < salt-bootstrap.sh > salt-bootstrap.sh.temp;mv salt-bootstrap.sh.temp salt-bootstrap.sh; cat salt-bootstrap.sh"
}
- openstackServer = openstack.runOpenstackCommand("openstack server create --key-name mcp-offline-keypair-${dateTime} --availability-zone ${VM_AVAILABILITY_ZONE} --image ${VM_IMAGE} --flavor ${VM_FLAVOR} --nic net-id=${VM_NETWORK_ID},v4-fixed-ip=${VM_IP} --user-data salt-bootstrap.sh mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+ if(VM_IP != ""){
+ openstackServer = openstack.runOpenstackCommand("openstack server create --key-name mcp-offline-keypair-${dateTime} --availability-zone ${VM_AVAILABILITY_ZONE} --image ${VM_IMAGE} --flavor ${VM_FLAVOR} --nic net-id=${VM_NETWORK_ID},v4-fixed-ip=${VM_IP} --user-data salt-bootstrap.sh mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+ }else{
+ openstackServer = openstack.runOpenstackCommand("openstack server create --key-name mcp-offline-keypair-${dateTime} --availability-zone ${VM_AVAILABILITY_ZONE} --image ${VM_IMAGE} --flavor ${VM_FLAVOR} --nic net-id=${VM_NETWORK_ID} --user-data salt-bootstrap.sh mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+ }
sleep(60)
- retry(VM_IP_RETRIES, VM_IP_DELAY){
+ common.retry(VM_IP_RETRIES, VM_IP_DELAY){
openstack.runOpenstackCommand("openstack ip floating add ${floatingIP} mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
}
sleep(500)
- retry(VM_CONNECT_RETRIES, VM_CONNECT_DELAY){
+ common.retry(VM_CONNECT_RETRIES, VM_CONNECT_DELAY){
sh "scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_rsa root@${floatingIP}:/srv/initComplete ./"
}
python.setupPepperVirtualenv(venvPepper, "http://${floatingIP}:6969", SALT_MASTER_CREDENTIALS)
}
+
stage("Prepare instance"){
salt.runSaltProcessStep(venvPepper, '*apt*', 'saltutil.refresh_pillar', [], null, true)
salt.runSaltProcessStep(venvPepper, '*apt*', 'saltutil.sync_all', [], null, true)
@@ -130,14 +120,8 @@
stage("Create Aptly"){
common.infoMsg("Creating Aptly")
salt.enforceState(venvPepper, '*apt*', ['aptly'], true, false, null, false, -1, 2)
- //TODO: Do it new way
- salt.cmdRun(venvPepper, '*apt*', "aptly_mirror_update.sh -s -v", true, null, true, ["runas=aptly"])
- salt.cmdRun(venvPepper, '*apt*', "nohup aptly api serve --no-lock > /dev/null 2>&1 </dev/null &", true, null, true, ["runas=aptly"])
- salt.cmdRun(venvPepper, '*apt*', "aptly-publisher --timeout=1200 publish -v -c /etc/aptly-publisher.yaml --architectures amd64 --url http://127.0.0.1:8080 --recreate --force-overwrite", true, null, true, ["runas=aptly"])
- salt.cmdRun(venvPepper, '*apt*', "aptly db cleanup", true, null, true, ["runas=aptly"])
- //NEW way
- //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", "runas=aptly"], null, true)
- //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acrfv", "runas=aptly"], null, true)
+ salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", "runas=aptly"], null, true)
+ salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acrfv", "runas=aptly"], null, true)
salt.cmdRun(venvPepper, '*apt*', "wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/aptly/aptly-update.sh -O /srv/scripts/aptly-update.sh")
salt.cmdRun(venvPepper, '*apt*', "chmod +x /srv/scripts/aptly-update.sh")
}
@@ -173,36 +157,37 @@
salt.cmdRun(venvPepper, '*apt*', "rm -rf /var/lib/cloud/sem/* /var/lib/cloud/instance /var/lib/cloud/instances/*")
salt.cmdRun(venvPepper, '*apt*', "cloud-init init")
- retry(3, 5){
+ common.retry(3, 5){
openstack.runOpenstackCommand("openstack server stop mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
}
- retry(6, 30){
+ common.retry(6, 30){
serverStatus = openstack.runOpenstackCommand("openstack server show --format value -c status mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
if(serverStatus != "SHUTOFF"){
throw new ResourceException("Instance is not ready for image create.")
}
}
- retry(3, 5){
+ common.retry(3, 5){
openstack.runOpenstackCommand("openstack server image create --name ${IMAGE_NAME}-${dateTime} --wait mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
}
}
stage("Publish image"){
common.infoMsg("Saving image ${IMAGE_NAME}-${dateTime}")
- retry(3, 5){
+ common.retry(3, 5){
openstack.runOpenstackCommand("openstack image save --file ${IMAGE_NAME}-${dateTime}.qcow2 ${IMAGE_NAME}-${dateTime}", rcFile, openstackEnv)
}
sh "md5sum ${IMAGE_NAME}-${dateTime}.qcow2 > ${IMAGE_NAME}-${dateTime}.qcow2.md5"
common.infoMsg("Uploading image ${IMAGE_NAME}-${dateTime}")
- retry(3, 5){
+ common.retry(3, 5){
uploadImageStatus = sh(script: "curl -f -T ${IMAGE_NAME}-${dateTime}.qcow2 ${UPLOAD_URL}", returnStatus: true)
if(uploadImageStatus!=0){
throw new Exception("Image upload failed")
}
}
- retry(3, 5){
+
+ common.retry(3, 5){
uploadMd5Status = sh(script: "curl -f -T ${IMAGE_NAME}-${dateTime}.qcow2.md5 ${UPLOAD_URL}", returnStatus: true)
if(uploadMd5Status != 0){
throw new Exception("MD5 sum upload failed")
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 71946b7..169bbd0 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -22,21 +22,45 @@
def flags = CLUSTER_FLAGS.tokenize(',')
def osds = OSD.tokenize(',')
-def removePartition(master, target, partition_uuid) {
- def partition = ""
- try {
- // partition = /dev/sdi2
- partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
+def removePartition(master, target, partition_uuid, type='', id=-1) {
+ def partition = ""
+ if (type == 'lockbox') {
+ try {
+ // umount - partition = /dev/sdi2
+ partition = runCephCommand(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
+ runCephCommand(master, target, "umount ${partition}")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ } else if (type == 'data') {
+ try {
+ // umount - partition = /dev/sdi2
+ partition = runCephCommand(master, target, "df | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
+ runCephCommand(master, target, "umount ${partition}")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ try {
+ // partition = /dev/sdi2
+ partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ } else {
+ try {
+ // partition = /dev/sdi2
+ partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ }
if (partition?.trim()) {
// dev = /dev/sdi
def dev = partition.replaceAll('\\d+$', "")
// part_id = 2
- def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
- runCephCommand(master, target, "parted ${dev} rm ${part_id}")
+ def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
+ runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
}
return
}
@@ -75,10 +99,12 @@
// get list of osd disks of the host
salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
- def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')['return']
+ def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
+
if(cephGrain['return'].isEmpty()){
throw new Exception("Ceph salt grain cannot be found!")
}
+ common.print(cephGrain)
def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
common.prettyPrint(ceph_disks)
@@ -137,8 +163,9 @@
}
for (osd_id in osd_ids) {
-
id = osd_id.replaceAll('osd.', '')
+ /*
+
def dmcrypt = ""
try {
dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
@@ -156,6 +183,8 @@
}
}
+ */
+
// remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
stage('Remove journal / block_db / block_wal partition') {
def partition_uuid = ""
@@ -163,39 +192,73 @@
def block_db_partition_uuid = ""
def block_wal_partition_uuid = ""
try {
- journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
- journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+ journal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
- block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+ block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
- block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+ block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
} catch (Exception e) {
common.infoMsg(e)
}
- // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+ // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
if (journal_partition_uuid?.trim()) {
- partition_uuid = journal_partition_uuid
- } else if (block_db_partition_uuid?.trim()) {
- partition_uuid = block_db_partition_uuid
+ removePartition(pepperEnv, HOST, journal_partition_uuid)
}
-
- // if disk has journal, block_db or block_wal on different disk, then remove the partition
- if (partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, partition_uuid)
+ if (block_db_partition_uuid?.trim()) {
+ removePartition(pepperEnv, HOST, block_db_partition_uuid)
}
if (block_wal_partition_uuid?.trim()) {
removePartition(pepperEnv, HOST, block_wal_partition_uuid)
}
+
+ try {
+ runCephCommand(pepperEnv, HOST, "partprobe")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ }
+
+ // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+ stage('Remove data / block / lockbox partition') {
+ def data_partition_uuid = ""
+ def block_partition_uuid = ""
+ def lockbox_partition_uuid = ""
+ try {
+ data_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+ common.print(data_partition_uuid)
+ } catch (Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ } catch (Exception e) {
+ common.infoMsg(e)
+ }
+
+ try {
+ lockbox_partition_uuid = data_partition_uuid
+ } catch (Exception e) {
+ common.infoMsg(e)
+ }
+
+ // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+ if (block_partition_uuid?.trim()) {
+ removePartition(pepperEnv, HOST, block_partition_uuid)
+ }
+ if (data_partition_uuid?.trim()) {
+ removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ }
+ if (lockbox_partition_uuid?.trim()) {
+ removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
+ }
}
}
// remove cluster flags
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 93b6573..2361098 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -122,9 +122,14 @@
if (DMCRYPT.toBoolean() == true) {
// remove partition tables
- stage('dd part tables') {
+ stage('dd / zap device') {
for (dev in devices) {
- runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+ runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=4096k count=1 conv=notrunc")
+ try {
+ runCephCommand(pepperEnv, HOST, "sgdisk --zap-all --clear --mbrtogpt -g -- ${dev}")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
}
}
@@ -135,7 +140,7 @@
// dev = /dev/sdi
def dev = partition.replaceAll("[0-9]", "")
// part_id = 2
- def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+ def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
try {
runCephCommand(pepperEnv, HOST, "Ignore | parted ${dev} rm ${part_id}")
} catch (Exception e) {
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index fa30579..41c08ab 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -477,18 +477,23 @@
stage('Run k8s conformance e2e tests') {
def image = TEST_K8S_CONFORMANCE_IMAGE
def output_file = image.replaceAll('/', '-') + '.output'
+ def target = 'ctl01*'
+ def conformance_output_file = 'conformance_test.tar'
// run image
- test.runConformanceTests(venvPepper, 'ctl01*', TEST_K8S_API_SERVER, image)
+ test.runConformanceTests(venvPepper, target, TEST_K8S_API_SERVER, image)
// collect output
sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(venvPepper, 'ctl01*', '/tmp/' + output_file)
+ file_content = salt.getFileContent(venvPepper, target, '/tmp/' + output_file)
writeFile file: "${artifacts_dir}${output_file}", text: file_content
sh "cat ${artifacts_dir}${output_file}"
// collect artifacts
archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+
+ // Copy test results
+ test.CopyConformanceResults(venvPepper, target, artifacts_dir, conformance_output_file)
}
}
diff --git a/create-debmirror-package.groovy b/create-debmirror-package.groovy
new file mode 100644
index 0000000..7911d37
--- /dev/null
+++ b/create-debmirror-package.groovy
@@ -0,0 +1,52 @@
+/**
+ *
+ * Create debmirror package pipeline
+ *
+ * Expected parameters:
+ * MIRROR_NAME - Name of the mirror
+ * MIRROR_URL - URL of mirror
+ * ROOT - Root directory of the upstream location
+ * METHOD - rsync or http
+ * DEBMIRROR_ARGS - args for debmirror comand
+ * UPLOAD_URL - URL to upload TAR to
+ */
+
+// Load shared libs
+def common = new com.mirantis.mk.Common()
+
+timeout(time: 12, unit: 'HOURS') {
+ node("python&&disk-xl") {
+ try {
+ def workspace = common.getWorkspace()
+ if(METHOD == "rsync"){
+ ROOT = ":mirror/${ROOT}"
+ }
+ stage("Create mirror"){
+ def mirrordir="${workspace}/mirror"
+ def debmlog="${workspace}/mirror_${MIRROR_NAME}_log"
+
+ sh "debmirror --verbose --method=${METHOD} --progress --host=${MIRROR_URL} --root=${ROOT} ${DEBMIRROR_ARGS} ${mirrordir}/${MIRROR_NAME} 2>&1 | tee -a ${debmlog}"
+
+ sh "tar -czvf ${workspace}/${MIRROR_NAME}.tar.gz -C ${mirrordir}/${MIRROR_NAME} ."
+ }
+
+ stage("Upload mirror"){
+ common.retry(3, 5, {
+ uploadImageStatus = sh(script: "curl -f -T ${workspace}/${MIRROR_NAME}.tar.gz ${UPLOAD_URL}", returnStatus: true)
+ if(uploadImageStatus!=0){
+ throw new Exception("Image upload failed")
+ }
+ })
+ }
+
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }finally {
+ stage("Cleanup"){
+ sh "rm -rf ${workspace}/*"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/deploy-heat-k8s-kqueen-pipeline.groovy b/deploy-heat-k8s-kqueen-pipeline.groovy
new file mode 100644
index 0000000..7071b96
--- /dev/null
+++ b/deploy-heat-k8s-kqueen-pipeline.groovy
@@ -0,0 +1,179 @@
+/**
+ * Helper pipeline for AWS deployments from kqueen
+ *
+ * Expected parameters:
+ * STACK_NAME Infrastructure stack name
+ * STACK_TEMPLATE File with stack template
+ *
+ * STACK_TEMPLATE_URL URL to git repo with stack templates
+ * STACK_TEMPLATE_CREDENTIALS Credentials to the templates repo
+ * STACK_TEMPLATE_BRANCH Stack templates repo branch
+ * STACK_COMPUTE_COUNT Number of compute nodes to launch
+ *
+ * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
+ * HEAT_STACK_ZONE Heat stack availability zone
+ * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
+ * OPENSTACK_API_URL OpenStack API address
+ * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
+ * OPENSTACK_API_PROJECT OpenStack project to connect to
+ * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
+ *
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL URL of Salt master
+ */
+
+common = new com.mirantis.mk.Common()
+git = new com.mirantis.mk.Git()
+openstack = new com.mirantis.mk.Openstack()
+orchestrate = new com.mirantis.mk.Orchestrate()
+python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+
+// Define global variables
+def venv
+def venvPepper
+def outputs = [:]
+
+def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
+def envParams
+timeout(time: 12, unit: 'HOURS') {
+ node("python") {
+ try {
+ // Set build-specific variables
+ venv = "${env.WORKSPACE}/venv"
+ venvPepper = "${env.WORKSPACE}/venvPepper"
+
+ //
+ // Prepare machines
+ //
+ stage ('Create infrastructure') {
+ // value defaults
+ envParams = [
+ 'cluster_zone': HEAT_STACK_ZONE,
+ 'cluster_public_net': HEAT_STACK_PUBLIC_NET
+ ]
+
+ // no underscore in STACK_NAME
+ STACK_NAME = STACK_NAME.replaceAll('_', '-')
+ outputs.put('stack_name', STACK_NAME)
+
+ // set description
+ currentBuild.description = STACK_NAME
+
+ // get templates
+ git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
+
+ // create openstack env
+ openstack.setupOpenstackVirtualenv(venv)
+ openstackCloud = openstack.createOpenstackEnv(venv,
+ OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+ OPENSTACK_API_PROJECT, "default", "", "default", "3")
+ openstack.getKeystoneToken(openstackCloud, venv)
+
+ // set reclass repo in heat env
+ try {
+ envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
+ envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
+ } catch (MissingPropertyException e) {
+ common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
+ }
+
+ // launch stack
+ openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
+
+ // get SALT_MASTER_URL
+ saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
+ // check that saltMasterHost is valid
+ if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+ common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+ throw new Exception("saltMasterHost is not a valid ip")
+ }
+
+ currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
+
+ SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+
+ // Setup virtualenv for pepper
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('Install core infrastructure') {
+ def staticMgmtNetwork = false
+ if (common.validInputParam('STATIC_MGMT_NETWORK')) {
+ staticMgmtNetwork = STATIC_MGMT_NETWORK.toBoolean()
+ }
+ orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
+
+ if (common.checkContains('STACK_INSTALL', 'kvm')) {
+ orchestrate.installInfraKvm(venvPepper)
+ orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
+ }
+
+ orchestrate.validateFoundationInfra(venvPepper)
+ }
+
+ stage('Install Kubernetes infra') {
+ // configure kubernetes_control_address - save loadbalancer
+ def awsOutputs = aws.getOutputs(venv, aws_env_vars, STACK_NAME)
+ common.prettyPrint(awsOutputs)
+ if (awsOutputs.containsKey('ControlLoadBalancer')) {
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', awsOutputs['ControlLoadBalancer']], null, true)
+ outputs.put('kubernetes_apiserver', 'https://' + awsOutputs['ControlLoadBalancer'])
+ }
+
+ // ensure certificates are generated properly
+ salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
+
+ orchestrate.installKubernetesInfra(venvPepper)
+ }
+
+ stage('Install Kubernetes control') {
+ orchestrate.installKubernetesControl(venvPepper)
+
+ // collect artifacts (kubeconfig)
+ writeFile(file: 'kubeconfig', text: salt.getFileContent(venvPepper, 'I@kubernetes:master and *01*', '/etc/kubernetes/admin-kube-config'))
+ archiveArtifacts(artifacts: 'kubeconfig')
+ }
+
+ stage('Install Kubernetes computes') {
+ if (common.validInputParam('STACK_COMPUTE_COUNT')) {
+ if (STACK_COMPUTE_COUNT > 0) {
+ // get stack info
+ def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
+
+ //update autoscaling group
+ aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
+
+ // wait for computes to boot up
+ aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
+ sleep(60)
+ }
+ }
+
+ orchestrate.installKubernetesCompute(venvPepper)
+ }
+
+ stage('Finalize') {
+ outputsPretty = common.prettify(outputs)
+ print(outputsPretty)
+ writeFile(file: 'outputs.json', text: outputsPretty)
+ archiveArtifacts(artifacts: 'outputs.json')
+ }
+
+ } catch (Throwable e) {
+ currentBuild.result = 'FAILURE'
+ throw e
+ } finally {
+ if (currentBuild.result == 'FAILURE') {
+ common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
+
+ if (common.validInputParam('SALT_MASTER_URL')) {
+ common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
+ }
+ }
+ }
+ }
+}
+
+
diff --git a/mirror-snapshot-pipeline.groovy b/mirror-snapshot-pipeline.groovy
new file mode 100644
index 0000000..abcf1a8
--- /dev/null
+++ b/mirror-snapshot-pipeline.groovy
@@ -0,0 +1,58 @@
+#!groovy
+
+// Collect parameters
+String mirror_name = env.MIRROR_NAME
+String mirror_target = env.MIRROR_TARGET ?: env.MIRROR_NAME
+
+String snapshot_name = env.SNAPSHOT_NAME as String
+String snapshot_id = env.SNAPSHOT_ID as String
+String snapshot_dir = env.SNAPSHOT_DIR
+String snapshot_rel_dir = env.SNAPSHOT_REL_DIR
+
+String root_dir = env.ROOT_DIR
+
+String slave_label = env.SLAVE_LABEL
+
+// Snapshot name can be hierarchical, i.e. can have subdirectories, so let's flatten it
+String normalized_snapshot_name = snapshot_name.replaceAll('/', '-')
+
+String _snapshot = ''
+
+node(slave_label) {
+ try {
+ dir(snapshot_dir) {
+ // Guess link target
+ if (snapshot_id ==~ /^\d{4}-\d{2}-\d{2}-\d{6}$/) {
+ // Exact snapshot ID
+ _snapshot = "${mirror_target}-${snapshot_id}"
+ } else if (snapshot_id == 'latest') {
+ // Latest available snapshot
+ _snapshot = sh (script: "sed '1p;d' '${mirror_target}-${snapshot_id}.target.txt'", returnStdout: true).trim()
+ } else {
+ // Some named snapshot
+ _snapshot = sh (script: "readlink '${mirror_target}-${snapshot_id}'", returnStdout: true).trim()
+ }
+
+ // Set name for the snapshot to prevent it from time-based cleanup
+ sh "ln -sfn '${_snapshot}' '${mirror_target}-${normalized_snapshot_name}'"
+ }
+
+ // Set top-level name
+ dir("${root_dir}/${snapshot_name}") {
+ sh "ln -sfn '${snapshot_rel_dir}/${_snapshot}' '${mirror_name}'"
+ sh "echo '${snapshot_rel_dir}/${_snapshot}' > '${mirror_name}'.target.txt"
+ }
+ } finally {
+ // Cleanup
+ dir("${snapshot_dir}@tmp") {
+ deleteDir()
+ }
+ dir("${root_dir}/${snapshot_name}@tmp") {
+ deleteDir()
+ }
+ }
+}
+
+// Set build description
+currentBuild.description = "<p><b>${_snapshot}</b> (from ${snapshot_id})</p>"
+
diff --git a/test-salt-formula-docs-pipeline.groovy b/test-salt-formula-docs-pipeline.groovy
new file mode 100644
index 0000000..e026257
--- /dev/null
+++ b/test-salt-formula-docs-pipeline.groovy
@@ -0,0 +1,107 @@
+/**
+ * Pipeline for generating and testing sphinx generated documentation
+ * MODEL_GIT_URL
+ * MODEL_GIT_REF
+ * CLUSTER_NAME
+ *
+ */
+
+def gerritRef
+try {
+ gerritRef = GERRIT_REFSPEC
+} catch (MissingPropertyException e) {
+ gerritRef = null
+}
+
+common = new com.mirantis.mk.Common()
+ssh = new com.mirantis.mk.Ssh()
+gerrit = new com.mirantis.mk.Gerrit()
+git = new com.mirantis.mk.Git()
+python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+
+timeout(time: 12, unit: 'HOURS') {
+ node("python") {
+ try {
+ def workspace = common.getWorkspace()
+ def masterName = "cfg01." + CLUSTER_NAME.replace("-","_") + ".lab"
+ //def jenkinsUserIds = common.getJenkinsUserIds()
+ def img = docker.image("tcpcloud/salt-models-testing:nightly")
+ img.pull()
+ img.inside("-u root:root --hostname ${masterName} --ulimit nofile=4096:8192 --cpus=2") {
+ stage("Prepare salt env") {
+ if(MODEL_GIT_REF != "" && MODEL_GIT_URL != "") {
+ checkouted = gerrit.gerritPatchsetCheckout(MODEL_GIT_URL, MODEL_GIT_REF, "HEAD", CREDENTIALS_ID)
+ } else {
+ throw new Exception("Cannot checkout gerrit patchset, MODEL_GIT_URL or MODEL_GIT_REF is null")
+ }
+ if(checkouted) {
+ if (fileExists('classes/system')) {
+ ssh.prepareSshAgentKey(CREDENTIALS_ID)
+ dir('classes/system') {
+ // XXX: JENKINS-33510 dir step not work properly inside containers
+ //remoteUrl = git.getGitRemote()
+ ssh.ensureKnownHosts("https://github.com/Mirantis/reclass-system-salt-model")
+ }
+ ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
+ }
+ }
+ // install all formulas
+ sh("apt-get update && apt-get install -y salt-formula-*")
+ withEnv(["MASTER_HOSTNAME=${masterName}", "CLUSTER_NAME=${CLUSTER_NAME}", "MINION_ID=${masterName}"]){
+ sh("cp -r ${workspace}/* /srv/salt/reclass && echo '127.0.1.2 salt' >> /etc/hosts")
+ sh("""bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts \
+ && source_local_envs \
+ && configure_salt_master \
+ && configure_salt_minion \
+ && install_salt_formula_pkg; \
+ saltservice_restart; \
+ saltmaster_init'""")
+ }
+ }
+ stage("Checkout formula review"){
+ if(gerritRef){
+ //TODO: checkout gerrit review and replace formula content in directory
+ // gerrit.gerritPatchsetCheckout([credentialsId: CREDENTIALS_ID])
+ }else{
+ common.successMsg("Test triggered manually, so skipping checkout formula review stage")
+ }
+ }
+ stage("Generate documentation"){
+ def saltResult = sh(script:"salt-call state.sls salt.minion,sphinx.server,nginx", returnStatus:true)
+ if(saltResult > 0){
+ common.warnMsg("Salt call salt.minion,sphinx.server,nginx failed but continuing")
+ }
+ }
+ stage("Publish outputs"){
+ try{
+ sh("mkdir ${workspace}/output")
+ //TODO: verify existance of created output files
+ // /srv/static/sites/reclass_doc will be used for publishHTML step
+ sh("tar -zcf ${workspace}/output/docs-html.tar.gz /srv/static/sites/reclass_doc")
+ sh("cp -R /srv/static/sites/reclass_doc ${workspace}")
+ publishHTML (target: [
+ reportDir: 'reclass_doc',
+ reportFiles: 'index.html',
+ reportName: "Reclass-documentation"
+ ])
+ // /srv/static/extern will be used as tar artifact
+ sh("tar -zcf ${workspace}/output/docs-src.tar.gz /srv/static/extern")
+ archiveArtifacts artifacts: "output/*"
+ }catch(Exception e){
+ common.errorMsg("Documentation publish stage failed!")
+ }finally{
+ sh("rm -r ./output")
+ }
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ common.sendNotification(currentBuild.result, "", ["slack"])
+ }
+ }
+}
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index 8f60727..be9c894 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -20,8 +20,8 @@
def checkouted = false
throttle(['test-formula']) {
- timeout(time: 12, unit: 'HOURS') {
- node("python") {
+ timeout(time: 1, unit: 'HOURS') {
+ node("python&&docker") {
try {
stage("checkout") {
if (defaultGitRef && defaultGitUrl) {
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index af22dc8..56e9bb9 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -43,8 +43,13 @@
try {
triggerTestFormulaJob(currentFormula, defaultGitRef, defaultGitUrl)
} catch (Exception e) {
- failedFormulas << currentFormula
- common.warningMsg("Test of ${currentFormula} failed : ${e}")
+ if (e.getMessage().contains("completed with status ABORTED")) {
+ common.warningMsg("Test of ${currentFormula} was aborted and will be retriggered")
+ futureFormulas << currentFormula
+ } else {
+ failedFormulas << currentFormula
+ common.warningMsg("Test of ${currentFormula} failed : ${e}")
+ }
}
}
}
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index e96bc98..3893066 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -29,8 +29,8 @@
def checkouted = false
throttle(['test-model']) {
- timeout(time: 12, unit: 'HOURS') {
- node("python") {
+ timeout(time: 1, unit: 'HOURS') {
+ node("python&&docker") {
try{
stage("checkout") {
if(defaultGitRef != "" && defaultGitUrl != "") {