Merge "Removed useless params from deploy-heat-k8s-kqueen"
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 71946b7..169bbd0 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -22,21 +22,45 @@
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 
-def removePartition(master, target, partition_uuid) {
-    def partition = ""
-    try {
-        // partition = /dev/sdi2
-        partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
-    } catch (Exception e) {
-        common.warningMsg(e)
-    }
 
+def removePartition(master, target, partition_uuid, type='', id=-1) {
+    def partition = ""
+    if (type == 'lockbox') {
+        try {
+            // umount - partition = /dev/sdi2
+            partition = runCephCommand(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
+            runCephCommand(master, target, "umount ${partition}")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    } else if (type == 'data') {
+        try {
+            // umount - partition = /dev/sdi2
+            partition = runCephCommand(master, target, "df | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
+            runCephCommand(master, target, "umount ${partition}")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+        try {
+            // partition = /dev/sdi2
+            partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    } else {
+        try {
+            // partition = /dev/sdi2
+            partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    }
     if (partition?.trim()) {
         // dev = /dev/sdi
         def dev = partition.replaceAll('\\d+$', "")
         // part_id = 2
-        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-        runCephCommand(master, target, "parted ${dev} rm ${part_id}")
+        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
+        runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
     }
     return
 }
@@ -75,10 +99,12 @@
 
         // get list of osd disks of the host
         salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-        def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')['return']
+        def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
+
         if(cephGrain['return'].isEmpty()){
             throw new Exception("Ceph salt grain cannot be found!")
         }
+        common.print(cephGrain)
         def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
         common.prettyPrint(ceph_disks)
 
@@ -137,8 +163,9 @@
         }
 
         for (osd_id in osd_ids) {
-
             id = osd_id.replaceAll('osd.', '')
+            /*
+
             def dmcrypt = ""
             try {
                 dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
@@ -156,6 +183,8 @@
                 }
 
             }
+            */
+
             // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
             stage('Remove journal / block_db / block_wal partition') {
                 def partition_uuid = ""
@@ -163,39 +192,73 @@
                 def block_db_partition_uuid = ""
                 def block_wal_partition_uuid = ""
                 try {
-                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
-                    journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
                 try {
-                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
-                    block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
 
                 try {
-                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
-                    block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
 
-                // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
                 if (journal_partition_uuid?.trim()) {
-                    partition_uuid = journal_partition_uuid
-                } else if (block_db_partition_uuid?.trim()) {
-                    partition_uuid = block_db_partition_uuid
+                    removePartition(pepperEnv, HOST, journal_partition_uuid)
                 }
-
-                // if disk has journal, block_db or block_wal on different disk, then remove the partition
-                if (partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, partition_uuid)
+                if (block_db_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, block_db_partition_uuid)
                 }
                 if (block_wal_partition_uuid?.trim()) {
                     removePartition(pepperEnv, HOST, block_wal_partition_uuid)
                 }
+
+                try {
+                    runCephCommand(pepperEnv, HOST, "partprobe")
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+            }
+
+            // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+            stage('Remove data / block / lockbox partition') {
+                def data_partition_uuid = ""
+                def block_partition_uuid = ""
+                def lockbox_partition_uuid = ""
+                try {
+                    data_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+                    common.print(data_partition_uuid)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+                try {
+                    block_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                try {
+                    lockbox_partition_uuid = data_partition_uuid
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                if (block_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, block_partition_uuid)
+                }
+                if (data_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+                }
+                if (lockbox_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
+                }
             }
         }
         // remove cluster flags
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 93b6573..2361098 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -122,9 +122,14 @@
         if (DMCRYPT.toBoolean() == true) {
 
             // remove partition tables
-            stage('dd part tables') {
+            stage('dd / zap device') {
                 for (dev in devices) {
-                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=4096k count=1 conv=notrunc")
+                    try {
+                        runCephCommand(pepperEnv, HOST, "sgdisk --zap-all --clear --mbrtogpt -g -- ${dev}")
+                    } catch (Exception e) {
+                        common.warningMsg(e)
+                    }
                 }
             }
 
@@ -135,7 +140,7 @@
                         // dev = /dev/sdi
                         def dev = partition.replaceAll("[0-9]", "")
                         // part_id = 2
-                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
                         try {
                             runCephCommand(pepperEnv, HOST, "Ignore | parted ${dev} rm ${part_id}")
                         } catch (Exception e) {
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 989d130..41c08ab 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -50,6 +50,13 @@
  *   TEST_TEMPEST_IMAGE           Tempest image link
  *   TEST_TEMPEST_PATTERN         If not false, run tests matched to pattern only
  *   TEST_TEMPEST_TARGET          Salt target for tempest node
+ *   TESTRAIL_REPORT              Whether upload results to testrail or not
+ *   TESTRAIL_REPORTER_IMAGE      Docker image for testrail reporter
+ *   TESTRAIL_QA_CREDENTIALS      Credentials for upload to testrail
+ *   TESTRAIL_MILESTONE           Product version for tests
+ *   TESTRAIL_PLAN                Testrail test plan
+ *   TESTRAIL_GROUP               Testrail test group
+ *   TESTRAIL_SUITE               Testrail test suite
  *
  * optional parameters for overwriting soft params
  *   SALT_OVERRIDES              YAML with overrides for Salt deployment
@@ -470,18 +477,23 @@
                 stage('Run k8s conformance e2e tests') {
                     def image = TEST_K8S_CONFORMANCE_IMAGE
                     def output_file = image.replaceAll('/', '-') + '.output'
+                    def target = 'ctl01*'
+                    def conformance_output_file = 'conformance_test.tar'
 
                     // run image
-                    test.runConformanceTests(venvPepper, 'ctl01*', TEST_K8S_API_SERVER, image)
+                    test.runConformanceTests(venvPepper, target, TEST_K8S_API_SERVER, image)
 
                     // collect output
                     sh "mkdir -p ${artifacts_dir}"
-                    file_content = salt.getFileContent(venvPepper, 'ctl01*', '/tmp/' + output_file)
+                    file_content = salt.getFileContent(venvPepper, target, '/tmp/' + output_file)
                     writeFile file: "${artifacts_dir}${output_file}", text: file_content
                     sh "cat ${artifacts_dir}${output_file}"
 
                     // collect artifacts
                     archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+
+                    // Copy test results
+                    test.CopyConformanceResults(venvPepper, target, artifacts_dir, conformance_output_file)
                 }
             }
 
@@ -500,6 +512,19 @@
                 stage('Archive rally artifacts') {
                     test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
                 }
+
+                if (common.validInputParam('TESTRAIL_REPORT') && TESTRAIL_REPORT.toBoolean()) {
+                    stage('Upload test results to TestRail') {
+                        def date = sh(script: 'date +%Y-%m-%d', returnStdout: true).trim()
+                        def plan = TESTRAIL_PLAN ?: "[${TESTRAIL_MILESTONE}]System-Devcloud-${date}"
+                        def group = TESTRAIL_GROUP ?: STACK_TEMPLATE
+
+                        salt.cmdRun(venvPepper, TEST_TEMPEST_TARGET, "cd /root/rally_reports && cp \$(ls -t *xml | head -n1) report.xml")
+                        test.uploadResultsTestrail("/root/rally_reports/report.xml",
+                                TESTRAIL_REPORTER_IMAGE, group, TESTRAIL_QA_CREDENTIALS,
+                                plan, TESTRAIL_MILESTONE, TESTRAIL_SUITE)
+                    }
+                }
             }
 
 
diff --git a/create-debmirror-package.groovy b/create-debmirror-package.groovy
new file mode 100644
index 0000000..7911d37
--- /dev/null
+++ b/create-debmirror-package.groovy
@@ -0,0 +1,52 @@
+/**
+ *
+ * Create debmirror package pipeline
+ *
+ * Expected parameters:
+ * MIRROR_NAME - Name of the mirror
+ * MIRROR_URL - URL of mirror
+ * ROOT - Root directory of the upstream location
+ * METHOD - rsync or http
+ * DEBMIRROR_ARGS - args for debmirror comand
+ * UPLOAD_URL - URL to upload TAR to
+ */
+
+// Load shared libs
+def common = new com.mirantis.mk.Common()
+
+timeout(time: 12, unit: 'HOURS') {
+    node("python&&disk-xl") {
+        try {
+            def workspace = common.getWorkspace()
+            if(METHOD == "rsync"){
+                ROOT = ":mirror/${ROOT}"
+            }
+            stage("Create mirror"){
+                def mirrordir="${workspace}/mirror"
+                def debmlog="${workspace}/mirror_${MIRROR_NAME}_log"
+
+                sh "debmirror --verbose --method=${METHOD} --progress --host=${MIRROR_URL} --root=${ROOT} ${DEBMIRROR_ARGS} ${mirrordir}/${MIRROR_NAME} 2>&1 | tee -a ${debmlog}"
+
+                sh "tar -czvf ${workspace}/${MIRROR_NAME}.tar.gz -C ${mirrordir}/${MIRROR_NAME} ."
+            }
+
+            stage("Upload mirror"){
+                common.retry(3, 5, {
+                    uploadImageStatus = sh(script: "curl -f -T ${workspace}/${MIRROR_NAME}.tar.gz ${UPLOAD_URL}", returnStatus: true)
+                    if(uploadImageStatus!=0){
+                        throw new Exception("Image upload failed")
+                    }
+                })
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
+        }finally {
+            stage("Cleanup"){
+                sh "rm -rf ${workspace}/*"
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index af22dc8..56e9bb9 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -43,8 +43,13 @@
         try {
           triggerTestFormulaJob(currentFormula, defaultGitRef, defaultGitUrl)
         } catch (Exception e) {
-          failedFormulas << currentFormula
-          common.warningMsg("Test of ${currentFormula} failed :  ${e}")
+          if (e.getMessage().contains("completed with status ABORTED")) {
+            common.warningMsg("Test of ${currentFormula} was aborted and will be retriggered")
+            futureFormulas << currentFormula
+          } else {
+            failedFormulas << currentFormula
+            common.warningMsg("Test of ${currentFormula} failed :  ${e}")
+          }
         }
       }
     }