Added 12h timeout to all pipelines

Change-Id: I085fcbda322d0877d5ffebd002fc109577788c29
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index f5c99a4..7a5821d 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -109,171 +109,172 @@
         sleep(10)
     }
 }
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
 
-node("python") {
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        if (MIGRATION_METHOD == 'per-osd') {
 
-    if (MIGRATION_METHOD == 'per-osd') {
-
-        if (flags.size() > 0) {
-            stage('Set cluster flags') {
-                for (flag in flags) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
-                }
-            }
-        }
-
-        def target_hosts = salt.getMinions(pepperEnv, TARGET)
-
-        for (tgt in target_hosts) {
-            def osd_ids = []
-
-            // get list of osd disks of the tgt
-            salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
-            def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
-
-            for (i in ceph_disks) {
-                def osd_id = i.getKey().toString()
-                if (osd_id in osds || OSD == '*') {
-                    osd_ids.add('osd.' + osd_id)
-                    print("Will migrate " + osd_id)
-                } else {
-                    print("Skipping " + osd_id)
+            if (flags.size() > 0) {
+                stage('Set cluster flags') {
+                    for (flag in flags) {
+                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                    }
                 }
             }
 
-            for (osd_id in osd_ids) {
+            def target_hosts = salt.getMinions(pepperEnv, TARGET)
 
-                def id = osd_id.replaceAll('osd.', '')
-                def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
+            for (tgt in target_hosts) {
+                def osd_ids = []
 
-                if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
+                // get list of osd disks of the tgt
+                salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+                def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
 
-                    // wait for healthy cluster before manipulating with osds
-                    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-                        waitForHealthy(pepperEnv)
-                    }
-
-                    // `ceph osd out <id> <id>`
-                    stage('Set OSDs out') {
-                            runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
-                    }
-
-                    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-                        sleep(5)
-                        waitForHealthy(pepperEnv)
-                    }
-
-                    // stop osd daemons
-                    stage('Stop OSD daemons') {
-                        salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')],  null, true)
-                    }
-
-                    // remove keyring `ceph auth del osd.3`
-                    stage('Remove OSD keyrings from auth') {
-                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
-                    }
-
-                    // remove osd `ceph osd rm osd.3`
-                    stage('Remove OSDs') {
-                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
-                    }
-
-                    def dmcrypt = ""
-                    try {
-                        dmcrypt = runCephCommand(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
-                    } catch (Exception e) {
-                        common.warningMsg(e)
-                    }
-
-                    if (dmcrypt?.trim()) {
-                        def mount = runCephCommand(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
-                        dev = mount.split()[0].replaceAll("[0-9]","")
-
-                        // remove partition tables
-                        stage('dd part tables') {
-                            runCephCommand(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
-                        }
-
-                        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-                        removeJournalOrBlockPartitions(pepperEnv, tgt, id)
-
-                        // reboot
-                        stage('reboot and wait') {
-                            salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
-                            salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
-                            sleep(10)
-                        }
-
-                        // zap disks `ceph-disk zap /dev/sdi`
-                        stage('Zap devices') {
-                            try {
-                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
-                            } catch (Exception e) {
-                                common.warningMsg(e)
-                            }
-                            runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
-                        }
-
+                for (i in ceph_disks) {
+                    def osd_id = i.getKey().toString()
+                    if (osd_id in osds || OSD == '*') {
+                        osd_ids.add('osd.' + osd_id)
+                        print("Will migrate " + osd_id)
                     } else {
+                        print("Skipping " + osd_id)
+                    }
+                }
 
-                        def mount = runCephCommand(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
-                        dev = mount.split()[0].replaceAll("[0-9]","")
+                for (osd_id in osd_ids) {
 
-                        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-                        removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+                    def id = osd_id.replaceAll('osd.', '')
+                    def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
 
-                        // umount `umount /dev/sdi1`
-                        stage('Umount devices') {
-                            runCephCommand(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+                    if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
+
+                        // wait for healthy cluster before manipulating with osds
+                        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+                            waitForHealthy(pepperEnv)
                         }
 
-                        // zap disks `ceph-disk zap /dev/sdi`
-                        stage('Zap device') {
-                            runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                        // `ceph osd out <id> <id>`
+                        stage('Set OSDs out') {
+                                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
                         }
-                    }
 
-                    // Deploy Ceph OSD
-                    stage('Deploy Ceph OSD') {
-                        salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
-                        salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
-                    }
-
-                    if (PER_OSD_CONTROL.toBoolean() == true) {
-                        stage("Verify backend version for osd.${id}") {
+                        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
                             sleep(5)
-                            runCephCommand(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
-                            runCephCommand(pepperEnv, tgt, "ceph -s")
+                            waitForHealthy(pepperEnv)
                         }
 
-                        stage('Ask for manual confirmation') {
-                            input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+                        // stop osd daemons
+                        stage('Stop OSD daemons') {
+                            salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')],  null, true)
+                        }
+
+                        // remove keyring `ceph auth del osd.3`
+                        stage('Remove OSD keyrings from auth') {
+                            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+                        }
+
+                        // remove osd `ceph osd rm osd.3`
+                        stage('Remove OSDs') {
+                            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+                        }
+
+                        def dmcrypt = ""
+                        try {
+                            dmcrypt = runCephCommand(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
+
+                        if (dmcrypt?.trim()) {
+                            def mount = runCephCommand(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                            dev = mount.split()[0].replaceAll("[0-9]","")
+
+                            // remove partition tables
+                            stage('dd part tables') {
+                                runCephCommand(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                            }
+
+                            // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                            removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+                            // reboot
+                            stage('reboot and wait') {
+                                salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
+                                salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+                                sleep(10)
+                            }
+
+                            // zap disks `ceph-disk zap /dev/sdi`
+                            stage('Zap devices') {
+                                try {
+                                    runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                                } catch (Exception e) {
+                                    common.warningMsg(e)
+                                }
+                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                            }
+
+                        } else {
+
+                            def mount = runCephCommand(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+                            dev = mount.split()[0].replaceAll("[0-9]","")
+
+                            // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                            removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+                            // umount `umount /dev/sdi1`
+                            stage('Umount devices') {
+                                runCephCommand(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+                            }
+
+                            // zap disks `ceph-disk zap /dev/sdi`
+                            stage('Zap device') {
+                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                            }
+                        }
+
+                        // Deploy Ceph OSD
+                        stage('Deploy Ceph OSD') {
+                            salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
+                            salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
+                        }
+
+                        if (PER_OSD_CONTROL.toBoolean() == true) {
+                            stage("Verify backend version for osd.${id}") {
+                                sleep(5)
+                                runCephCommand(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+                                runCephCommand(pepperEnv, tgt, "ceph -s")
+                            }
+
+                            stage('Ask for manual confirmation') {
+                                input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+                            }
                         }
                     }
                 }
-            }
-            if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
-                stage("Verify backend versions") {
-                    sleep(5)
-                    runCephCommand(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
-                    runCephCommand(pepperEnv, tgt, "ceph -s")
+                if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
+                    stage("Verify backend versions") {
+                        sleep(5)
+                        runCephCommand(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+                        runCephCommand(pepperEnv, tgt, "ceph -s")
+                    }
+
+                    stage('Ask for manual confirmation') {
+                        input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
+                    }
                 }
 
-                stage('Ask for manual confirmation') {
-                    input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
-                }
             }
-
-        }
-        // remove cluster flags
-        if (flags.size() > 0) {
-            stage('Unset cluster flags') {
-                for (flag in flags) {
-                    common.infoMsg('Removing flag ' + flag)
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+            // remove cluster flags
+            if (flags.size() > 0) {
+                stage('Unset cluster flags') {
+                    for (flag in flags) {
+                        common.infoMsg('Removing flag ' + flag)
+                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                    }
                 }
             }
         }