Merge "update mirror - change target for mirror VM"
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
new file mode 100644
index 0000000..8757ca1
--- /dev/null
+++ b/ceph-add-node.groovy
@@ -0,0 +1,76 @@
+/**
+ *
+ * Add Ceph node to existing cluster
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *  HOST                        Host (minion id) to be added
+ *  HOST_TYPE                   Type of Ceph node to be added. Valid values are mon/osd/rgw
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+orchestrate = new com.mirantis.mk.Orchestrate()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    matches = ["osd", "mon", "rgw"]
+    def found = false
+    for (s in matches) {
+        if (HOST_TYPE.toLowerCase() == s) {
+            found = true
+        }
+    }
+
+    if (!found) {
+            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
+        break
+    }
+
+    if (HOST_TYPE.toLowerCase() != 'osd') {
+
+        // enforce crushmap `crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled; ceph osd setcrushmap -i /etc/ceph/crushmap.compiled`
+        stage('Launch VMs') {
+            salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control', true)
+
+            // wait till the HOST appears in salt-key on salt-master
+            salt.minionPresent(pepperEnv, 'I@salt:master', HOST)
+        }
+    }
+
+    // run basic states
+    stage('Install infra') {
+        orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
+    }
+
+    if (HOST_TYPE.toLowerCase() == 'osd') {
+
+        // Install Ceph osd
+        stage('Install Ceph OSD') {
+            orchestrate.installCephOsd(pepperEnv, HOST)
+        }
+    } else if (HOST_TYPE.toLowerCase() == 'mon') {
+        // Install Ceph mon
+        stage('Install Ceph MON') {
+            salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
+            // install Ceph Mons
+            salt.enforceState(pepperEnv, 'I@ceph:mon', 'ceph.mon', true)
+            if (salt.testTarget(pepperEnv, 'I@ceph:mgr')) {
+                salt.enforceState(pepperEnv, 'I@ceph:mgr', 'ceph.mgr', true)
+            }
+        }
+    } else if (HOST_TYPE.toLowerCase() == 'rgw') {
+        // Install Ceph rgw
+        stage('Install Ceph RGW') {
+            salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy', 'ceph.radosgw'], true)
+        }
+    }
+}
diff --git a/ceph-add-osd-host.groovy b/ceph-add-osd-host.groovy
deleted file mode 100644
index fb0eceb..0000000
--- a/ceph-add-osd-host.groovy
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- *
- * Add OSD host to existing cluster
- *
- * Requred parameters:
- *  SALT_MASTER_URL             URL of Salt master
- *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
- *  HOST                        Host (minion id) to be added
- *
- */
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-orchestrate = new com.mirantis.mk.Orchestrate()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-
-node("python") {
-
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-    // run basic states
-    stage('Install infra') {
-        orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
-    }
-    // Install Ceph
-    stage('Install Ceph') {
-        orchestrate.installCephOsd(pepperEnv, HOST)
-    }
-}
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
new file mode 100644
index 0000000..21671bf
--- /dev/null
+++ b/ceph-remove-node.groovy
@@ -0,0 +1,182 @@
+/**
+ *
+ * Remove Ceph node from existing cluster
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *  HOST                        Host (minion id) to be removed
+ *  HOST_TYPE                   Type of Ceph node to be removed. Valid values are mon/osd/rgw
+ *  ADMIN_HOST                  Host (minion id) with admin keyring
+ *  WAIT_FOR_HEALTHY            Wait for cluster rebalance before stoping daemons
+ *  GENERATE_CRUSHMAP           Set to true if the crush map should be generated
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+orchestrate = new com.mirantis.mk.Orchestrate()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+
+def runCephCommand(master, target, cmd) {
+    return salt.cmdRun(master, target, cmd)
+}
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    matches = ["osd", "mon", "rgw"]
+    def found = false
+    for (s in matches) {
+        if (HOST_TYPE.toLowerCase() == s) {
+            found = true
+        }
+    }
+
+    if (!found) {
+            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
+        break
+    }
+
+    stage('Refresh_pillar') {
+        salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
+    }
+
+    //  split minion id on '.' and remove '*'
+    def target = HOST.split("\\.")[0].replace("*", "")
+
+    def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
+    domain = _pillar['return'][0].values()[0].values()[0]
+
+    if (HOST_TYPE.toLowerCase() == 'rgw') {
+        // Remove Ceph rgw
+        stage('Remove Ceph RGW') {
+            salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
+        }
+    }
+
+    if (HOST_TYPE.toLowerCase() != 'osd') {
+
+        // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
+        stage('Destroy VM') {
+            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
+            def kvm01 = _pillar['return'][0].values()[0].values()[0]
+
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
+            def targetProvider = _pillar['return'][0].values()[0]
+
+            salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
+            salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
+        }
+    } else if (HOST_TYPE.toLowerCase() == 'osd') {
+        def osd_ids = []
+
+        // get list of osd disks of the host
+        def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+
+        for (i in ceph_disks) {
+            def osd_id = i.getKey().toString()
+            osd_ids.add('osd.' + osd_id)
+            print("Will delete " + osd_id)
+        }
+
+        // `ceph osd out <id> <id>`
+        stage('Set OSDs out') {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+        }
+
+        // wait for healthy cluster
+        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+            stage('Waiting for healthy cluster') {
+                while (true) {
+                    def health = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+                    if (health.contains('HEALTH_OK')) {
+                        common.infoMsg('Cluster is healthy')
+                        break;
+                    }
+                    sleep(10)
+                }
+            }
+        }
+
+        // stop osd daemons
+        stage('Stop OSD daemons') {
+            for (i in osd_ids) {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+            }
+        }
+
+        // `ceph osd crush remove osd.2`
+        stage('Remove OSDs from CRUSH') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+            }
+        }
+
+        // remove keyring `ceph auth del osd.3`
+        stage('Remove OSD keyrings from auth') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+            }
+        }
+
+        // remove osd `ceph osd rm osd.3`
+        stage('Remove OSDs') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+            }
+        }
+
+        // purge Ceph pkgs
+        stage('Purge Ceph OSD pkgs') {
+            runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd libcephfs2 python-cephfs librados2 python-rados -y')
+        }
+
+        // stop salt-minion service and move its configuration
+        stage('Stop salt-minion') {
+            salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
+            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
+        }
+    }
+
+    stage('Remove salt-key') {
+        try {
+            salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+        try {
+            salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    }
+
+    if (HOST_TYPE.toLowerCase() == 'mon') {
+        // Update Monmap
+        stage('Update monmap') {
+            runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
+            try {
+                runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
+            } catch (Exception e) {
+                common.warningMsg(e)
+            }
+            runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
+        }
+
+        // Update configs
+        stage('Update Ceph configs') {
+            salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
+        }
+    }
+
+    if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
+        stage('Generate CRUSHMAP') {
+            salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
+        }
+    }
+}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 0471f9f..18c5525 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -7,6 +7,7 @@
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
  *
  *  HOST                        Host (minion id) to be removed
+ *  OSD                         Comma separated list of osd ids to be removed
  *  ADMIN_HOST                  Host (minion id) with admin keyring
  *  CLUSTER_FLAGS               Comma separated list of tags to apply to cluster
  *  WAIT_FOR_HEALTHY            Wait for cluster rebalance before stoping daemons
@@ -63,15 +64,15 @@
     }
 
     // wait for healthy cluster
-    if (common.validInputParam('WAIT_FOR_HEALTHY') && WAIT_FOR_HEALTHY.toBoolean()) {
+    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
         stage('Waiting for healthy cluster') {
             while (true) {
                 def health = runCephCommand(pepperEnv, 'ceph health')['return'][0].values()[0]
-                if (health.contains('HEALTH OK')) {
+                if (health.contains('HEALTH_OK')) {
                     common.infoMsg('Cluster is healthy')
                     break;
                 }
-                sleep(60)
+                sleep(10)
             }
         }
     }
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
new file mode 100644
index 0000000..6c6f281
--- /dev/null
+++ b/ceph-replace-failed-osd.groovy
@@ -0,0 +1,184 @@
+/**
+ *
+ * Replace failed disk with a new disk
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL                 URL of Salt master
+ *  SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *  HOST                            Host (minion id) to be removed
+ *  ADMIN_HOST                      Host (minion id) with admin keyring and /etc/crushmap file present
+ *  OSD                             Failed OSD ids to be replaced (comma-separated list - 1,2,3)
+ *  DEVICE                          Comma separated list of failed devices that will be replaced at HOST (/dev/sdb,/dev/sdc)
+ *  JOURNAL_OR_BLOCKDB_PARTITION    Comma separated list of partitions where journal or block_db for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
+ *  ENFORCE_CRUSHMAP                Set to true if the prepared crush map should be enforced
+ *  WAIT_FOR_PG_REBALANCE           Wait for PGs to rebalance after osd is removed from crush map
+ *  CLUSTER_FLAGS                   Comma separated list of tags to apply to cluster
+ *  WAIT_FOR_HEALTHY                Wait for cluster rebalance before stoping daemons
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+def flags = CLUSTER_FLAGS.tokenize(',')
+def osds = OSD.tokenize(',')
+def devices = DEVICE.tokenize(',')
+def journals_blockdbs = JOURNAL_OR_BLOCKDB_PARTITION.tokenize(',')
+
+
+def runCephCommand(master, target, cmd) {
+    return salt.cmdRun(master, target, cmd)
+}
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    if (flags.size() > 0) {
+        stage('Set cluster flags') {
+            for (flag in flags) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+            }
+        }
+    }
+
+    def osd_ids = []
+
+    print("osds:")
+    print(osds)
+
+    // get list of osd disks of the host
+    def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+    common.prettyPrint(ceph_disks)
+
+    for (i in ceph_disks) {
+        def osd_id = i.getKey().toString()
+        if (osd_id in osds || OSD == '*') {
+            osd_ids.add('osd.' + osd_id)
+            print("Will delete " + osd_id)
+        } else {
+            print("Skipping " + osd_id)
+        }
+    }
+
+    // `ceph osd out <id> <id>`
+    stage('Set OSDs out') {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+    }
+
+    // wait for healthy cluster
+    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+        stage('Waiting for healthy cluster') {
+            while (true) {
+                def health = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+                if (health.contains('HEALTH_OK')) {
+                    common.infoMsg('Cluster is healthy')
+                    break;
+                }
+                sleep(10)
+            }
+        }
+    }
+
+    // stop osd daemons
+    stage('Stop OSD daemons') {
+        for (i in osd_ids) {
+            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+        }
+    }
+
+    // `ceph osd crush remove osd.2`
+    stage('Remove OSDs from CRUSH') {
+        for (i in osd_ids) {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+        }
+    }
+
+    // wait for pgs to rebalance
+    if (WAIT_FOR_PG_REBALANCE.toBoolean() == true) {
+        stage('Waiting for pgs to rebalance') {
+            while (true) {
+                def status = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph -s')['return'][0].values()[0]
+                if (!status.contains('degraded')) {
+                    common.infoMsg('PGs rebalanced')
+                    break;
+                }
+                sleep(10)
+            }
+        }
+    }
+
+    // remove keyring `ceph auth del osd.3`
+    stage('Remove OSD keyrings from auth') {
+        for (i in osd_ids) {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+        }
+    }
+
+    // remove osd `ceph osd rm osd.3`
+    stage('Remove OSDs') {
+        for (i in osd_ids) {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+        }
+    }
+
+    // remove cluster flags
+    if (flags.size() > 0) {
+        stage('Unset cluster flags') {
+            for (flag in flags) {
+                common.infoMsg('Removing flag ' + flag)
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+            }
+        }
+    }
+
+    // umount `umount /dev/sdi1`
+    stage('Umount devices') {
+        for (dev in devices) {
+            runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+        }
+    }
+
+    // zap disks `ceph-disk zap /dev/sdi`
+    stage('Zap devices') {
+        for (dev in devices) {
+            runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+        }
+    }
+
+    // remove journal or block_db partition `parted /dev/sdj rm 3`
+    stage('Remove journal / block_db partitions') {
+        for (journal_blockdb in journals_blockdbs) {
+            if (journal_blockdb?.trim()) {
+                // dev = /dev/sdi
+                def dev = journal_blockdb.replaceAll("[0-9]", "")
+                // part_id = 2
+                def part_id = journal_blockdb.substring(journal_blockdb.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
+            }
+        }
+    }
+
+    // Deploy failed Ceph OSD
+    stage('Deploy Ceph OSD') {
+        salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
+    }
+
+
+    if (ENFORCE_CRUSHMAP.toBoolean() == true) {
+
+        // enforce crushmap `crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled; ceph osd setcrushmap -i /etc/ceph/crushmap.compiled`
+        stage('Enforce crushmap') {
+
+            stage('Ask for manual confirmation') {
+                input message: "Are you sure that your ADMIN_HOST has correct /etc/ceph/crushmap file? Click proceed to compile and enforce crushmap."
+            }
+            runCephCommand(pepperEnv, ADMIN_HOST, 'crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled')
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd setcrushmap -i /etc/ceph/crushmap.compiled')
+        }
+    }
+}
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index d3b3640..926fd88 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -37,9 +37,9 @@
           def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
           def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
           imageTagsList << tag
-          if (revision != "") {
-            imageTagsList << "${tag}-${revision}"
-          }
+          revision = revision ? revision : "0"
+          imageTagsList << "${tag}-${revision}"
+
           if (!imageTagsList.contains("latest")) {
             imageTagsList << "latest"
           }
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index 17b8fff..f556f14 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -59,7 +59,7 @@
         if (checkouted) {
           def workspace = common.getWorkspace()
           common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
-          saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger(), LEGACY_TEST_MODE, RECLASS_IGNORE_CLASS_NOTFOUND)
+          saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger(), RECLASS_IGNORE_CLASS_NOTFOUND, LEGACY_TEST_MODE)
         }
       }
     } catch (Throwable e) {