Merge "Made report dir as option"
diff --git a/build-mirror-image.groovy b/build-mirror-image.groovy
index 822d398..0aff603 100644
--- a/build-mirror-image.groovy
+++ b/build-mirror-image.groovy
@@ -44,28 +44,12 @@
 def uploadImageStatus = ""
 def uploadMd5Status = ""
 
-def retry(int times = 5, int delay = 0, Closure body) {
-    int retries = 0
-    def exceptions = []
-    while(retries++ < times) {
-        try {
-            return body.call()
-        } catch(e) {
-            sleep(delay)
-        }
-    }
-    currentBuild.result = "FAILURE"
-    throw new Exception("Failed after $times retries")
-}
-
 timeout(time: 12, unit: 'HOURS') {
     node("python&&disk-xl") {
         try {
             def workspace = common.getWorkspace()
             openstackEnv = String.format("%s/venv", workspace)
             venvPepper = String.format("%s/venvPepper", workspace)
-            rcFile = openstack.createOpenstackEnv(openstackEnv, OS_URL, OS_CREDENTIALS_ID, OS_PROJECT, "default", "", "default", "2", "")
-            def openstackVersion = OS_VERSION
 
             VM_IP_DELAY = VM_IP_DELAY as Integer
             VM_IP_RETRIES = VM_IP_RETRIES as Integer
@@ -79,10 +63,11 @@
                 }
 
                 sh "wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/salt-bootstrap.sh"
-                openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+                openstack.setupOpenstackVirtualenv(openstackEnv, OS_VERSION)
             }
 
             stage("Spawn Instance"){
+                rcFile = openstack.createOpenstackEnv(OS_URL, OS_CREDENTIALS_ID, OS_PROJECT, "default", "", "default", "2", "")
                 privateKey = openstack.runOpenstackCommand("openstack keypair create mcp-offline-keypair-${dateTime}", rcFile, openstackEnv)
 
                 common.infoMsg(privateKey)
@@ -94,21 +79,26 @@
                     sh "envsubst < salt-bootstrap.sh > salt-bootstrap.sh.temp;mv salt-bootstrap.sh.temp salt-bootstrap.sh; cat salt-bootstrap.sh"
                 }
 
-                openstackServer = openstack.runOpenstackCommand("openstack server create --key-name mcp-offline-keypair-${dateTime} --availability-zone ${VM_AVAILABILITY_ZONE} --image ${VM_IMAGE} --flavor ${VM_FLAVOR} --nic net-id=${VM_NETWORK_ID},v4-fixed-ip=${VM_IP} --user-data salt-bootstrap.sh mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+                if(VM_IP != ""){
+                    openstackServer = openstack.runOpenstackCommand("openstack server create --key-name mcp-offline-keypair-${dateTime} --availability-zone ${VM_AVAILABILITY_ZONE} --image ${VM_IMAGE} --flavor ${VM_FLAVOR} --nic net-id=${VM_NETWORK_ID},v4-fixed-ip=${VM_IP} --user-data salt-bootstrap.sh mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+                }else{
+                    openstackServer = openstack.runOpenstackCommand("openstack server create --key-name mcp-offline-keypair-${dateTime} --availability-zone ${VM_AVAILABILITY_ZONE} --image ${VM_IMAGE} --flavor ${VM_FLAVOR} --nic net-id=${VM_NETWORK_ID} --user-data salt-bootstrap.sh mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)                    
+                }
                 sleep(60)
 
-                retry(VM_IP_RETRIES, VM_IP_DELAY){
+                common.retry(VM_IP_RETRIES, VM_IP_DELAY){
                     openstack.runOpenstackCommand("openstack ip floating add ${floatingIP} mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
                 }
 
                 sleep(500)
 
-                retry(VM_CONNECT_RETRIES, VM_CONNECT_DELAY){
+                common.retry(VM_CONNECT_RETRIES, VM_CONNECT_DELAY){
                     sh "scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_rsa root@${floatingIP}:/srv/initComplete ./"
                 }
 
                 python.setupPepperVirtualenv(venvPepper, "http://${floatingIP}:6969", SALT_MASTER_CREDENTIALS)
             }
+
             stage("Prepare instance"){
                 salt.runSaltProcessStep(venvPepper, '*apt*', 'saltutil.refresh_pillar', [], null, true)
                 salt.runSaltProcessStep(venvPepper, '*apt*', 'saltutil.sync_all', [], null, true)
@@ -130,14 +120,8 @@
             stage("Create Aptly"){
                 common.infoMsg("Creating Aptly")
                 salt.enforceState(venvPepper, '*apt*', ['aptly'], true, false, null, false, -1, 2)
-                //TODO: Do it new way
-                salt.cmdRun(venvPepper, '*apt*', "aptly_mirror_update.sh -s -v", true, null, true, ["runas=aptly"])
-                salt.cmdRun(venvPepper, '*apt*', "nohup aptly api serve --no-lock > /dev/null 2>&1 </dev/null &", true, null, true, ["runas=aptly"])
-                salt.cmdRun(venvPepper, '*apt*', "aptly-publisher --timeout=1200 publish -v -c /etc/aptly-publisher.yaml --architectures amd64 --url http://127.0.0.1:8080 --recreate --force-overwrite", true, null, true, ["runas=aptly"])
-                salt.cmdRun(venvPepper, '*apt*', "aptly db cleanup", true, null, true, ["runas=aptly"])
-                //NEW way
-                //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", "runas=aptly"], null, true)
-                //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acrfv", "runas=aptly"], null, true)
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", "runas=aptly"], null, true)
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acrfv", "runas=aptly"], null, true)
                 salt.cmdRun(venvPepper, '*apt*', "wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/aptly/aptly-update.sh -O /srv/scripts/aptly-update.sh")
                 salt.cmdRun(venvPepper, '*apt*', "chmod +x /srv/scripts/aptly-update.sh")
             }
@@ -173,36 +157,37 @@
                 salt.cmdRun(venvPepper, '*apt*', "rm -rf /var/lib/cloud/sem/* /var/lib/cloud/instance /var/lib/cloud/instances/*")
                 salt.cmdRun(venvPepper, '*apt*', "cloud-init init")
 
-                retry(3, 5){
+                common.retry(3, 5){
                     openstack.runOpenstackCommand("openstack server stop mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
                 }
 
-                retry(6, 30){
+                common.retry(6, 30){
                     serverStatus = openstack.runOpenstackCommand("openstack server show --format value -c status mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
                     if(serverStatus != "SHUTOFF"){
                         throw new ResourceException("Instance is not ready for image create.")
                     }
                 }
-                retry(3, 5){
+                common.retry(3, 5){
                     openstack.runOpenstackCommand("openstack server image create --name ${IMAGE_NAME}-${dateTime} --wait mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
                 }
             }
 
             stage("Publish image"){
                 common.infoMsg("Saving image ${IMAGE_NAME}-${dateTime}")
-                retry(3, 5){
+                common.retry(3, 5){
                     openstack.runOpenstackCommand("openstack image save --file ${IMAGE_NAME}-${dateTime}.qcow2 ${IMAGE_NAME}-${dateTime}", rcFile, openstackEnv)
                 }
                 sh "md5sum ${IMAGE_NAME}-${dateTime}.qcow2 > ${IMAGE_NAME}-${dateTime}.qcow2.md5"
 
                 common.infoMsg("Uploading image ${IMAGE_NAME}-${dateTime}")
-                retry(3, 5){
+                common.retry(3, 5){
                     uploadImageStatus = sh(script: "curl -f -T ${IMAGE_NAME}-${dateTime}.qcow2 ${UPLOAD_URL}", returnStatus: true)
                     if(uploadImageStatus!=0){
                         throw new Exception("Image upload failed")
                     }
                 }
-                retry(3, 5){
+
+                common.retry(3, 5){
                     uploadMd5Status = sh(script: "curl -f -T ${IMAGE_NAME}-${dateTime}.qcow2.md5 ${UPLOAD_URL}", returnStatus: true)
                     if(uploadMd5Status != 0){
                         throw new Exception("MD5 sum upload failed")
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 71946b7..169bbd0 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -22,21 +22,45 @@
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 
-def removePartition(master, target, partition_uuid) {
-    def partition = ""
-    try {
-        // partition = /dev/sdi2
-        partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
-    } catch (Exception e) {
-        common.warningMsg(e)
-    }
 
+def removePartition(master, target, partition_uuid, type='', id=-1) {
+    def partition = ""
+    if (type == 'lockbox') {
+        try {
+            // umount - partition = /dev/sdi2
+            partition = runCephCommand(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
+            runCephCommand(master, target, "umount ${partition}")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    } else if (type == 'data') {
+        try {
+            // umount - partition = /dev/sdi2
+            partition = runCephCommand(master, target, "df | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
+            runCephCommand(master, target, "umount ${partition}")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+        try {
+            // partition = /dev/sdi2
+            partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    } else {
+        try {
+            // partition = /dev/sdi2
+            partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    }
     if (partition?.trim()) {
         // dev = /dev/sdi
         def dev = partition.replaceAll('\\d+$', "")
         // part_id = 2
-        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-        runCephCommand(master, target, "parted ${dev} rm ${part_id}")
+        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
+        runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
     }
     return
 }
@@ -75,10 +99,12 @@
 
         // get list of osd disks of the host
         salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-        def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')['return']
+        def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
+
         if(cephGrain['return'].isEmpty()){
             throw new Exception("Ceph salt grain cannot be found!")
         }
+        common.print(cephGrain)
         def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
         common.prettyPrint(ceph_disks)
 
@@ -137,8 +163,9 @@
         }
 
         for (osd_id in osd_ids) {
-
             id = osd_id.replaceAll('osd.', '')
+            /*
+
             def dmcrypt = ""
             try {
                 dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
@@ -156,6 +183,8 @@
                 }
 
             }
+            */
+
             // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
             stage('Remove journal / block_db / block_wal partition') {
                 def partition_uuid = ""
@@ -163,39 +192,73 @@
                 def block_db_partition_uuid = ""
                 def block_wal_partition_uuid = ""
                 try {
-                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
-                    journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
                 try {
-                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
-                    block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
 
                 try {
-                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
-                    block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
 
-                // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
                 if (journal_partition_uuid?.trim()) {
-                    partition_uuid = journal_partition_uuid
-                } else if (block_db_partition_uuid?.trim()) {
-                    partition_uuid = block_db_partition_uuid
+                    removePartition(pepperEnv, HOST, journal_partition_uuid)
                 }
-
-                // if disk has journal, block_db or block_wal on different disk, then remove the partition
-                if (partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, partition_uuid)
+                if (block_db_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, block_db_partition_uuid)
                 }
                 if (block_wal_partition_uuid?.trim()) {
                     removePartition(pepperEnv, HOST, block_wal_partition_uuid)
                 }
+
+                try {
+                    runCephCommand(pepperEnv, HOST, "partprobe")
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+            }
+
+            // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+            stage('Remove data / block / lockbox partition') {
+                def data_partition_uuid = ""
+                def block_partition_uuid = ""
+                def lockbox_partition_uuid = ""
+                try {
+                    data_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+                    common.print(data_partition_uuid)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+                try {
+                    block_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                try {
+                    lockbox_partition_uuid = data_partition_uuid
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                if (block_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, block_partition_uuid)
+                }
+                if (data_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+                }
+                if (lockbox_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
+                }
             }
         }
         // remove cluster flags
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 93b6573..2361098 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -122,9 +122,14 @@
         if (DMCRYPT.toBoolean() == true) {
 
             // remove partition tables
-            stage('dd part tables') {
+            stage('dd / zap device') {
                 for (dev in devices) {
-                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=4096k count=1 conv=notrunc")
+                    try {
+                        runCephCommand(pepperEnv, HOST, "sgdisk --zap-all --clear --mbrtogpt -g -- ${dev}")
+                    } catch (Exception e) {
+                        common.warningMsg(e)
+                    }
                 }
             }
 
@@ -135,7 +140,7 @@
                         // dev = /dev/sdi
                         def dev = partition.replaceAll("[0-9]", "")
                         // part_id = 2
-                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
                         try {
                             runCephCommand(pepperEnv, HOST, "Ignore | parted ${dev} rm ${part_id}")
                         } catch (Exception e) {
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 6236f2a..da3e177 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -181,7 +181,7 @@
                 }
                 // XXX: Workaround to have `/var/lib/jenkins` on all
                 // nodes where are jenkins_slave services are created.
-                salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
+                salt.cmdRun(pepperEnv, 'I@docker:swarm', "mkdir -p /var/lib/jenkins")
             }
 
             stage("Configure CI/CD services") {
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 989d130..b4c6d11 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -50,6 +50,13 @@
  *   TEST_TEMPEST_IMAGE           Tempest image link
  *   TEST_TEMPEST_PATTERN         If not false, run tests matched to pattern only
  *   TEST_TEMPEST_TARGET          Salt target for tempest node
+ *   TESTRAIL_REPORT              Whether upload results to testrail or not
+ *   TESTRAIL_REPORTER_IMAGE      Docker image for testrail reporter
+ *   TESTRAIL_QA_CREDENTIALS      Credentials for upload to testrail
+ *   TESTRAIL_MILESTONE           Product version for tests
+ *   TESTRAIL_PLAN                Testrail test plan
+ *   TESTRAIL_GROUP               Testrail test group
+ *   TESTRAIL_SUITE               Testrail test suite
  *
  * optional parameters for overwriting soft params
  *   SALT_OVERRIDES              YAML with overrides for Salt deployment
@@ -381,8 +388,7 @@
                         orchestrate.installOpenstackNetwork(venvPepper)
                     }
 
-                    salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
-                    salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
+                    salt.cmdRun(venvPepper, 'I@keystone:server', '. /root/keystonercv3; openstack network list')
                 }
 
                 if (salt.testTarget(venvPepper, 'I@ironic:conductor')){
@@ -470,18 +476,23 @@
                 stage('Run k8s conformance e2e tests') {
                     def image = TEST_K8S_CONFORMANCE_IMAGE
                     def output_file = image.replaceAll('/', '-') + '.output'
+                    def target = 'ctl01*'
+                    def conformance_output_file = 'conformance_test.tar'
 
                     // run image
-                    test.runConformanceTests(venvPepper, 'ctl01*', TEST_K8S_API_SERVER, image)
+                    test.runConformanceTests(venvPepper, target, TEST_K8S_API_SERVER, image)
 
                     // collect output
                     sh "mkdir -p ${artifacts_dir}"
-                    file_content = salt.getFileContent(venvPepper, 'ctl01*', '/tmp/' + output_file)
+                    file_content = salt.getFileContent(venvPepper, target, '/tmp/' + output_file)
                     writeFile file: "${artifacts_dir}${output_file}", text: file_content
                     sh "cat ${artifacts_dir}${output_file}"
 
                     // collect artifacts
                     archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+
+                    // Copy test results
+                    test.CopyConformanceResults(venvPepper, target, artifacts_dir, conformance_output_file)
                 }
             }
 
@@ -500,6 +511,19 @@
                 stage('Archive rally artifacts') {
                     test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
                 }
+
+                if (common.validInputParam('TESTRAIL_REPORT') && TESTRAIL_REPORT.toBoolean()) {
+                    stage('Upload test results to TestRail') {
+                        def date = sh(script: 'date +%Y-%m-%d', returnStdout: true).trim()
+                        def plan = TESTRAIL_PLAN ?: "[${TESTRAIL_MILESTONE}]System-Devcloud-${date}"
+                        def group = TESTRAIL_GROUP ?: STACK_TEMPLATE
+
+                        salt.cmdRun(venvPepper, TEST_TEMPEST_TARGET, "cd /root/rally_reports && cp \$(ls -t *xml | head -n1) report.xml")
+                        test.uploadResultsTestrail("/root/rally_reports/report.xml",
+                                TESTRAIL_REPORTER_IMAGE, group, TESTRAIL_QA_CREDENTIALS,
+                                plan, TESTRAIL_MILESTONE, TESTRAIL_SUITE)
+                    }
+                }
             }
 
 
diff --git a/create-debmirror-package.groovy b/create-debmirror-package.groovy
new file mode 100644
index 0000000..7911d37
--- /dev/null
+++ b/create-debmirror-package.groovy
@@ -0,0 +1,52 @@
+/**
+ *
+ * Create debmirror package pipeline
+ *
+ * Expected parameters:
+ * MIRROR_NAME - Name of the mirror
+ * MIRROR_URL - URL of mirror
+ * ROOT - Root directory of the upstream location
+ * METHOD - rsync or http
+ * DEBMIRROR_ARGS - args for debmirror comand
+ * UPLOAD_URL - URL to upload TAR to
+ */
+
+// Load shared libs
+def common = new com.mirantis.mk.Common()
+
+timeout(time: 12, unit: 'HOURS') {
+    node("python&&disk-xl") {
+        try {
+            def workspace = common.getWorkspace()
+            if(METHOD == "rsync"){
+                ROOT = ":mirror/${ROOT}"
+            }
+            stage("Create mirror"){
+                def mirrordir="${workspace}/mirror"
+                def debmlog="${workspace}/mirror_${MIRROR_NAME}_log"
+
+                sh "debmirror --verbose --method=${METHOD} --progress --host=${MIRROR_URL} --root=${ROOT} ${DEBMIRROR_ARGS} ${mirrordir}/${MIRROR_NAME} 2>&1 | tee -a ${debmlog}"
+
+                sh "tar -czvf ${workspace}/${MIRROR_NAME}.tar.gz -C ${mirrordir}/${MIRROR_NAME} ."
+            }
+
+            stage("Upload mirror"){
+                common.retry(3, 5, {
+                    uploadImageStatus = sh(script: "curl -f -T ${workspace}/${MIRROR_NAME}.tar.gz ${UPLOAD_URL}", returnStatus: true)
+                    if(uploadImageStatus!=0){
+                        throw new Exception("Image upload failed")
+                    }
+                })
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
+        }finally {
+            stage("Cleanup"){
+                sh "rm -rf ${workspace}/*"
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/cvp-func.groovy b/cvp-func.groovy
new file mode 100644
index 0000000..ef50945
--- /dev/null
+++ b/cvp-func.groovy
@@ -0,0 +1,63 @@
+/**
+ *
+ * Launch validation of the cloud
+ *
+ * Expected parameters:
+
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ *   PROXY                       Proxy address (if any) for accessing the Internet. It will be used for cloning repos and installing pip dependencies
+ *   TEST_IMAGE                  Docker image link to use for running container with testing tools.
+ *   TOOLS_REPO                  URL of repo where testing tools, scenarios, configs are located
+ *
+ *   DEBUG_MODE                  If you need to debug (keep container after test), please enabled this
+ *   SKIP_LIST_PATH              Path to tempest skip list file in TOOLS_REPO
+ *   TARGET_NODE                 Node to run container with Tempest/Rally
+ *   TEMPEST_REPO                Tempest repo to clone and use
+ *   TEMPEST_TEST_PATTERN        Tests to run during HA scenarios
+ *   TEMPEST_ENDPOINT_TYPE       Type of OS endpoint to use during test run
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def saltMaster
+def artifacts_dir = 'validation_artifacts/'
+def remote_artifacts_dir = '/root/qa_results/'
+
+node() {
+    try{
+        stage('Initialization') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            validate.runBasicContainer(saltMaster, TARGET_NODE, TEST_IMAGE)
+            sh "rm -rf ${artifacts_dir}"
+            salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
+            validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO, TEMPEST_ENDPOINT_TYPE)
+        }
+
+        stage('Run Tempest tests') {
+            sh "mkdir -p ${artifacts_dir}"
+            validate.runCVPtempest(saltMaster, TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir)
+        }
+
+        stage('Collect results') {
+            validate.addFiles(saltMaster, TARGET_NODE, remote_artifacts_dir, artifacts_dir)
+            archiveArtifacts artifacts: "${artifacts_dir}/*"
+            junit "${artifacts_dir}/*.xml"
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            if (TOOLS_REPO != "") {
+                validate.openstack_cleanup(saltMaster, TARGET_NODE)
+            }
+            validate.runCleanup(saltMaster, TARGET_NODE)
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
+        }
+    }
+}
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
new file mode 100644
index 0000000..0fb9085
--- /dev/null
+++ b/cvp-perf.groovy
@@ -0,0 +1,57 @@
+/**
+ *
+ * Launch validation of the cloud
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ *   PROXY                       Proxy address (if any) for accessing the Internet. It will be used for cloning repos and installing pip dependencies
+ *   TEST_IMAGE                  Docker image link to use for running container with testing tools.
+ *   TOOLS_REPO                  URL of repo where testing tools, scenarios, configs are located
+ *
+ *   TARGET_NODE                 Node to run container with Rally
+ *   DEBUG_MODE                  If you need to debug (keep container after test), please enabled this
+ *   RALLY_SCENARIO_FILE         Path to Rally scenario file in container
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def artifacts_dir = 'validation_artifacts/'
+def remote_artifacts_dir = '/root/qa_results/'
+def saltMaster
+
+node() {
+    try{
+        stage('Initialization') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            sh "rm -rf ${artifacts_dir}"
+            salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
+            validate.runBasicContainer(saltMaster, TARGET_NODE, TEST_IMAGE)
+            validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "")
+        }
+
+        stage('Run Rally tests') {
+            sh "mkdir -p ${artifacts_dir}"
+            validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir)
+        }
+
+        stage('Collect results') {
+            validate.addFiles(saltMaster, TARGET_NODE, remote_artifacts_dir, artifacts_dir)
+            archiveArtifacts artifacts: "${artifacts_dir}/*"
+            junit "${artifacts_dir}/*.xml"
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            validate.runCleanup(saltMaster, TARGET_NODE)
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
+        }
+    }
+}
+
diff --git a/deploy-heat-k8s-kqueen-pipeline.groovy b/deploy-heat-k8s-kqueen-pipeline.groovy
new file mode 100644
index 0000000..7071b96
--- /dev/null
+++ b/deploy-heat-k8s-kqueen-pipeline.groovy
@@ -0,0 +1,179 @@
+/**
+ * Helper pipeline for AWS deployments from kqueen
+ *
+ * Expected parameters:
+ *   STACK_NAME                 Infrastructure stack name
+ *   STACK_TEMPLATE             File with stack template
+ *
+ *   STACK_TEMPLATE_URL         URL to git repo with stack templates
+ *   STACK_TEMPLATE_CREDENTIALS Credentials to the templates repo
+ *   STACK_TEMPLATE_BRANCH      Stack templates repo branch
+ *   STACK_COMPUTE_COUNT        Number of compute nodes to launch
+ *
+ *   HEAT_STACK_ENVIRONMENT     Heat stack environmental parameters
+ *   HEAT_STACK_ZONE            Heat stack availability zone
+ *   HEAT_STACK_PUBLIC_NET      Heat stack floating IP pool
+ *   OPENSTACK_API_URL          OpenStack API address
+ *   OPENSTACK_API_CREDENTIALS  Credentials to the OpenStack API
+ *   OPENSTACK_API_PROJECT      OpenStack project to connect to
+ *   OPENSTACK_API_VERSION      Version of the OpenStack API (2/3)
+ *
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API
+ *   SALT_MASTER_URL            URL of Salt master
+ */
+
+common = new com.mirantis.mk.Common()
+git = new com.mirantis.mk.Git()
+openstack = new com.mirantis.mk.Openstack()
+orchestrate = new com.mirantis.mk.Orchestrate()
+python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+
+// Define global variables
+def venv
+def venvPepper
+def outputs = [:]
+
+def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
+def envParams
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+         try {
+            // Set build-specific variables
+            venv = "${env.WORKSPACE}/venv"
+            venvPepper = "${env.WORKSPACE}/venvPepper"
+
+            //
+            // Prepare machines
+            //
+            stage ('Create infrastructure') {
+                // value defaults
+                envParams = [
+                    'cluster_zone': HEAT_STACK_ZONE,
+                    'cluster_public_net': HEAT_STACK_PUBLIC_NET
+                ]
+
+                // no underscore in STACK_NAME
+                STACK_NAME = STACK_NAME.replaceAll('_', '-')
+                outputs.put('stack_name', STACK_NAME)
+
+                // set description
+                currentBuild.description = STACK_NAME
+
+                // get templates
+                git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
+
+                // create openstack env
+                openstack.setupOpenstackVirtualenv(venv)
+                openstackCloud = openstack.createOpenstackEnv(venv,
+                    OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                    OPENSTACK_API_PROJECT, "default", "", "default", "3")
+                openstack.getKeystoneToken(openstackCloud, venv)
+
+                // set reclass repo in heat env
+                try {
+                    envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
+                    envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
+                } catch (MissingPropertyException e) {
+                    common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
+                }
+
+                // launch stack
+                openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
+
+                // get SALT_MASTER_URL
+                saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
+                // check that saltMasterHost is valid
+                if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                    common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                    throw new Exception("saltMasterHost is not a valid ip")
+                }
+
+                currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
+
+                SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+
+                // Setup virtualenv for pepper
+                python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
+
+            stage('Install core infrastructure') {
+                def staticMgmtNetwork = false
+                if (common.validInputParam('STATIC_MGMT_NETWORK')) {
+                    staticMgmtNetwork = STATIC_MGMT_NETWORK.toBoolean()
+                }
+                orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
+
+                if (common.checkContains('STACK_INSTALL', 'kvm')) {
+                    orchestrate.installInfraKvm(venvPepper)
+                    orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
+                }
+
+                orchestrate.validateFoundationInfra(venvPepper)
+            }
+
+            stage('Install Kubernetes infra') {
+                // configure kubernetes_control_address - save loadbalancer
+                def awsOutputs = aws.getOutputs(venv, aws_env_vars, STACK_NAME)
+                common.prettyPrint(awsOutputs)
+                if (awsOutputs.containsKey('ControlLoadBalancer')) {
+                    salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', awsOutputs['ControlLoadBalancer']], null, true)
+                    outputs.put('kubernetes_apiserver', 'https://' + awsOutputs['ControlLoadBalancer'])
+                }
+
+                // ensure certificates are generated properly
+                salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
+
+                orchestrate.installKubernetesInfra(venvPepper)
+            }
+
+            stage('Install Kubernetes control') {
+                orchestrate.installKubernetesControl(venvPepper)
+
+                // collect artifacts (kubeconfig)
+                writeFile(file: 'kubeconfig', text: salt.getFileContent(venvPepper, 'I@kubernetes:master and *01*', '/etc/kubernetes/admin-kube-config'))
+                archiveArtifacts(artifacts: 'kubeconfig')
+            }
+
+            stage('Install Kubernetes computes') {
+                if (common.validInputParam('STACK_COMPUTE_COUNT')) {
+                    if (STACK_COMPUTE_COUNT > 0) {
+                        // get stack info
+                        def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
+
+                        //update autoscaling group
+                        aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
+
+                        // wait for computes to boot up
+                        aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
+                        sleep(60)
+                    }
+                }
+
+                orchestrate.installKubernetesCompute(venvPepper)
+            }
+
+            stage('Finalize') {
+                outputsPretty = common.prettify(outputs)
+                print(outputsPretty)
+                writeFile(file: 'outputs.json', text: outputsPretty)
+                archiveArtifacts(artifacts: 'outputs.json')
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        } finally {
+            if (currentBuild.result == 'FAILURE') {
+                common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
+
+                if (common.validInputParam('SALT_MASTER_URL')) {
+                    common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
+                }
+            }
+        }
+    }
+}
+
+
diff --git a/generate-salt-model-docs-pipeline.groovy b/generate-salt-model-docs-pipeline.groovy
new file mode 100644
index 0000000..4a36f0e
--- /dev/null
+++ b/generate-salt-model-docs-pipeline.groovy
@@ -0,0 +1,96 @@
+/**
+ * Pipeline for generating sphinx reclass generated documentation
+ * MODEL_GIT_URL
+ * MODEL_GIT_REF
+ * CLUSTER_NAME
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+ssh = new com.mirantis.mk.Ssh()
+gerrit = new com.mirantis.mk.Gerrit()
+git = new com.mirantis.mk.Git()
+python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+
+timeout(time: 12, unit: 'HOURS') {
+  node("python") {
+    try {
+       def workspace = common.getWorkspace()
+       def masterName = "cfg01." + CLUSTER_NAME.replace("-","_") + ".lab"
+       def jenkinsUserIds = common.getJenkinsUserIds()
+       def img = docker.image("tcpcloud/salt-models-testing:nightly")
+       img.pull()
+       img.inside("-u root:root --hostname ${masterName} --ulimit nofile=4096:8192 --cpus=2") {
+           stage("Prepare salt env") {
+              if(MODEL_GIT_REF != "" && MODEL_GIT_URL != "") {
+                  checkouted = gerrit.gerritPatchsetCheckout(MODEL_GIT_URL, MODEL_GIT_REF, "HEAD", CREDENTIALS_ID)
+              } else {
+                throw new Exception("Cannot checkout gerrit patchset, MODEL_GIT_URL or MODEL_GIT_REF is null")
+              }
+              if(checkouted) {
+                if (fileExists('classes/system')) {
+                    ssh.prepareSshAgentKey(CREDENTIALS_ID)
+                    dir('classes/system') {
+                      // XXX: JENKINS-33510 dir step not work properly inside containers, so let's taky reclass system model directly
+                      //remoteUrl = git.getGitRemote()
+                      ssh.ensureKnownHosts("https://github.com/Mirantis/reclass-system-salt-model")
+                    }
+                    ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
+                }
+              }
+              withEnv(["MASTER_HOSTNAME=${masterName}", "CLUSTER_NAME=${CLUSTER_NAME}", "MINION_ID=${masterName}"]){
+                    sh("cp -r ${workspace}/* /srv/salt/reclass && echo '127.0.1.2  salt' >> /etc/hosts")
+                    sh("""bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts \
+                          && source_local_envs \
+                          && configure_salt_master \
+                          && configure_salt_minion \
+                          && install_salt_formula_pkg; \
+                          saltservice_restart; \
+                          saltmaster_init'""")
+              }
+           }
+           stage("Generate documentation"){
+                def saltResult = sh(script:"salt-call state.sls salt.minion,sphinx.server,nginx", returnStatus:true)
+                if(saltResult > 0){
+                    common.warnMsg("Salt call salt.minion,sphinx.server,nginx failed but continuing")
+                }
+           }
+           stage("Publish outputs"){
+                try {
+                    // /srv/static/sites/reclass_doc will be used for publishHTML step
+                    // /srv/static/extern will be used as tar artifact
+                    def outputPresent = sh(script:"ls /srv/static/sites/reclass_doc > /dev/null 2>&1 && ls /srv/static/extern  > /dev/null 2>&1", returnStatus: true) == 0
+                    if(outputPresent){
+                      sh("""mkdir ${workspace}/output && \
+                            tar -zcf ${workspace}/output/docs-html.tar.gz /srv/static/sites/reclass_doc && \
+                            tar -zcf ${workspace}/output/docs-src.tar.gz /srv/static/extern && \
+                            cp -R /srv/static/sites/reclass_doc ${workspace}/output && \
+                            chown -R ${jenkinsUserIds[0]}:${jenkinsUserIds[1]} ${workspace}/output""")
+
+                      publishHTML (target: [
+                          alwaysLinkToLastBuild: true,
+                          keepAll: true,
+                          reportDir: 'output/reclass_doc',
+                          reportFiles: 'index.html',
+                          reportName: "Reclass-documentation"
+                      ])
+                      archiveArtifacts artifacts: "output/*"
+                  } else {
+                    common.errorMsg("Documentation publish failed, one of output directories /srv/static/sites/reclass_doc or /srv/static/extern not exists!")
+                  }
+                } catch(Exception e) {
+                    common.errorMsg("Documentation publish stage failed!")
+                }
+           }
+       }
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      throw e
+    } finally {
+      common.sendNotification(currentBuild.result, "", ["slack"])
+    }
+  }
+}
diff --git a/git-mirror-2way-pipeline.groovy b/git-mirror-2way-pipeline.groovy
index 5dfb4d1..21d72bf 100644
--- a/git-mirror-2way-pipeline.groovy
+++ b/git-mirror-2way-pipeline.groovy
@@ -42,7 +42,11 @@
          currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
          throw e
       } finally {
-         common.sendNotification(currentBuild.result,"",["slack"])
+         if(env.getEnvironment().containsKey("NOTIFICATION_RECIPIENTS")){
+           common.sendNotification(currentBuild.result,"",["slack", "email"], ["failed"], env["JOB_NAME"], env["BUILD_NUMBER"], env["BUILD_URL"], "MCP jenkins", env["NOTIFICATION_RECIPIENTS"])
+         }else{
+           common.sendNotification(currentBuild.result, "", ["slack"])
+         }
       }
     }
   }
diff --git a/kafka-demo.groovy b/kafka-demo.groovy
index d884713..87f20ed 100644
--- a/kafka-demo.groovy
+++ b/kafka-demo.groovy
@@ -24,23 +24,20 @@
         stage("Enforce kubernetes.control") {
             common.infoMsg('Enforcing kubernetes.control on I@kubernetes:master')
 
-            salt.runSaltProcessStep(
+            salt.enforceState(
                 master,
                 'I@kubernetes:master',
-                'state.sls',
-                ['kubernetes.control'],
+                'kubernetes.control'
             )
         }
 
         stage("setup-components") {
             common.infoMsg('Setting up components')
 
-            salt.runSaltProcessStep(
+            salt.cmdRun(
                 master,
                 'I@kubernetes:master',
-                'cmd.run',
-                ['/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done;\'']
-            )
+                '/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done;\'')
 
         }
 
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index 06dc48e..61015f5 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -248,8 +248,8 @@
                     orchestrate.installOpenstackNetwork(saltMaster)
                 }
 
-                salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
-                salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
+                salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; neutron net-list')
+                salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; nova net-list')
             }
 
             if (salt.testTarget(saltMaster, 'I@ironic:conductor')){
diff --git a/mirror-snapshot-pipeline.groovy b/mirror-snapshot-pipeline.groovy
new file mode 100644
index 0000000..abcf1a8
--- /dev/null
+++ b/mirror-snapshot-pipeline.groovy
@@ -0,0 +1,58 @@
+#!groovy
+
+// Collect parameters
+String mirror_name   = env.MIRROR_NAME
+String mirror_target = env.MIRROR_TARGET ?: env.MIRROR_NAME
+
+String snapshot_name = env.SNAPSHOT_NAME as String
+String snapshot_id   = env.SNAPSHOT_ID   as String
+String snapshot_dir  = env.SNAPSHOT_DIR
+String snapshot_rel_dir  = env.SNAPSHOT_REL_DIR
+
+String root_dir      = env.ROOT_DIR
+
+String slave_label   = env.SLAVE_LABEL
+
+// Snapshot name can be hierarchical, i.e. can have subdirectories, so let's flatten it
+String normalized_snapshot_name = snapshot_name.replaceAll('/', '-')
+
+String _snapshot = ''
+
+node(slave_label) {
+    try {
+        dir(snapshot_dir) {
+            // Guess link target
+            if (snapshot_id ==~ /^\d{4}-\d{2}-\d{2}-\d{6}$/) {
+                // Exact snapshot ID
+                _snapshot = "${mirror_target}-${snapshot_id}"
+            } else if (snapshot_id == 'latest') {
+                // Latest available snapshot
+                _snapshot = sh (script: "sed '1p;d' '${mirror_target}-${snapshot_id}.target.txt'", returnStdout: true).trim()
+            } else {
+                // Some named snapshot
+                _snapshot = sh (script: "readlink '${mirror_target}-${snapshot_id}'", returnStdout: true).trim()
+            }
+
+            // Set name for the snapshot to prevent it from time-based cleanup
+            sh "ln -sfn '${_snapshot}' '${mirror_target}-${normalized_snapshot_name}'"
+        }
+
+        // Set top-level name
+        dir("${root_dir}/${snapshot_name}") {
+            sh "ln -sfn '${snapshot_rel_dir}/${_snapshot}' '${mirror_name}'"
+            sh "echo '${snapshot_rel_dir}/${_snapshot}' > '${mirror_name}'.target.txt"
+        }
+    } finally {
+        // Cleanup
+        dir("${snapshot_dir}@tmp") {
+            deleteDir()
+        }
+        dir("${root_dir}/${snapshot_name}@tmp") {
+            deleteDir()
+        }
+    }
+}
+
+// Set build description
+currentBuild.description = "<p><b>${_snapshot}</b> (from ${snapshot_id})</p>"
+
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 3c28552..7d6cf6d 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -91,8 +91,11 @@
                 // Restart supervisor-vrouter.
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
 
-                // Apply salt,collectd to update information about current network interfaces.
-                salt.enforceState(pepperEnv, targetLiveAll, 'salt,collectd', true)
+                // Apply salt and collectd if is present to update information about current network interfaces.
+                salt.enforceState(pepperEnv, targetLiveAll, 'salt', true)
+                if(!salt.getPillar(pepperEnv, minions[0], "collectd")['return'][0].values()[0].isEmpty()) {
+                    salt.enforceState(pepperEnv, targetLiveAll, 'collectd', true)
+                }
             }
 
         } catch (Throwable e) {
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index 8f60727..be9c894 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -20,8 +20,8 @@
 def checkouted = false
 
 throttle(['test-formula']) {
-  timeout(time: 12, unit: 'HOURS') {
-    node("python") {
+  timeout(time: 1, unit: 'HOURS') {
+    node("python&&docker") {
       try {
         stage("checkout") {
           if (defaultGitRef && defaultGitUrl) {
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index af22dc8..3c75950 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -43,8 +43,13 @@
         try {
           triggerTestFormulaJob(currentFormula, defaultGitRef, defaultGitUrl)
         } catch (Exception e) {
-          failedFormulas << currentFormula
-          common.warningMsg("Test of ${currentFormula} failed :  ${e}")
+          if (e.getMessage().contains("completed with status ABORTED")) {
+            common.warningMsg("Test of ${currentFormula} was aborted and will be retriggered")
+            futureFormulas << currentFormula
+          } else {
+            failedFormulas << currentFormula
+            common.warningMsg("Test of ${currentFormula} failed :  ${e}")
+          }
         }
       }
     }
@@ -100,7 +105,7 @@
         try {
           saltVersion = SALT_VERSION
             } catch (MissingPropertyException e) {
-          saltVersion = "latest"
+          saltVersion = "" // default value is empty string, means latest
         }
         withEnv(["SALT_VERSION=${saltVersion}"]) {
           sh("make clean && make test")
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index e96bc98..3893066 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -29,8 +29,8 @@
 def checkouted = false
 
 throttle(['test-model']) {
-  timeout(time: 12, unit: 'HOURS') {
-    node("python") {
+  timeout(time: 1, unit: 'HOURS') {
+    node("python&&docker") {
       try{
         stage("checkout") {
           if(defaultGitRef != "" && defaultGitUrl != "") {
diff --git a/update-mirror-image.groovy b/update-mirror-image.groovy
index 8bde843..e37d537 100644
--- a/update-mirror-image.groovy
+++ b/update-mirror-image.groovy
@@ -70,16 +70,16 @@
             if(UPDATE_DOCKER_REGISTRY.toBoolean()){
                 stage('Update Docker images'){
                     common.infoMsg("Updating Docker images.")
-                    salt.enforceState(venvPepper, '*apt*', ['docker.client.registry'], true)
+                    salt.enforceState(venvPepper, '*apt*', 'docker.client.registry')
                     if(CLEANUP_DOCKER_CACHE.toBoolean()){
-                        salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['docker system prune --all --force'], null, true)
+                        salt.cmdRun(venvPepper, '*apt*', 'docker system prune --all --force')
                     }
                 }
             }
             if(UPDATE_PYPI.toBoolean()){
                 stage('Update PyPi packages'){
                     common.infoMsg("Updating PyPi packages.")
-                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+                    salt.cmdRun(venvPepper, '*apt*', 'pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt')
                 }
             }
             if(UPDATE_GIT.toBoolean()){
@@ -91,7 +91,7 @@
             if(UPDATE_IMAGES.toBoolean()){
                 stage('Update VM images'){
                     common.infoMsg("Updating VM images.")
-                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+                    salt.runSaltProcessStep(venvPepper, '*apt*', '/srv/scripts/update-images.sh')
                 }
             }
         } catch (Throwable e) {
@@ -101,4 +101,4 @@
             throw e
         }
     }
-}
\ No newline at end of file
+}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 0efc83d..d423347 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -22,7 +22,7 @@
             stage("Update Reclass"){
                 common.infoMsg("Updating reclass model")
                 salt.cmdRun(venvPepper, "I@salt:master", 'cd /srv/salt/reclass && git pull -r && git submodule update', false)
-                salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', ['reclass-salt --top'], null, true)
+                salt.cmdRun(venvPepper, 'I@salt:master', 'reclass-salt --top')
                 salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
             }
 
@@ -65,4 +65,4 @@
             throw e
         }
     }
-}
\ No newline at end of file
+}