Fix missing steps for Ceph Add OSD Node ( upmap ) pipeline

Install infra and Update/Install monitoring steps were added

Related-prod: PROD-34896
Change-Id: Icd50f2e1aec88df3f2b537e89553cc3e03eea422
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index aa7e5e2..3c195ea 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -1,11 +1,11 @@
 /**
  *
- * Add Ceph node to existing cluster using upmap mechanism
+ * Add Ceph OSD node to existing cluster using upmap mechanism
  *
  * Requred parameters:
  *  SALT_MASTER_URL             URL of Salt master
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
- *  HOST                        Host (minion id) to be added
+ *  HOST                        OSD Host (minion id) to be added
  *
  */
 
@@ -39,65 +39,99 @@
 
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
-        // create connection to salt master
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        try {
+            // create connection to salt master
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-        stage ("verify client versions")
-        {
-          // I@docker:swarm and I@prometheus:server - mon* nodes
-          def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not ( I@docker:swarm and I@prometheus:server )")
-          for ( node in nodes )
-          {
-            def versions = salt.cmdRun("pepperEnv", node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
-            versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
-            if ( versions['client']['group']['release'] != 'luminous' )
-            {
-              throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+            if (!HOST.toLowerCase().contains("osd")) {
+                common.errorMsg("This pipeline can only be used to add new OSD nodes to an existing Ceph cluster.")
+                throw new InterruptedException()
             }
-          }
-        }
 
-        stage("enable luminous compat") {
-            runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
-        }
-
-        stage("enable upmap balancer") {
-            runCephCommand('ceph balancer on')['return'][0].values()[0]
-            runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
-        }
-
-
-        stage("set norebalance") {
-            runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
-        }
-
-        stage('Install Ceph OSD') {
-            orchestrate.installCephOsd(pepperEnv, HOST)
-        }
-
-        def mapping = []
-
-        stage("update mappings") {
-            def pgmap
-            for (int x = 1; x <= 3; x++) {
-                pgmap = getpgmap()
-                if (pgmap == '') {
-                    return 1
-                } else {
-                    pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
-                    generatemapping(pepperEnv, pgmap, mapping)
-                    mapping.each(this.&runCephCommand)
-                    sleep(30)
+            stage ("verify client versions")
+            {
+                // I@docker:swarm and I@prometheus:server - mon* nodes
+                def nodes = salt.getMinions(pepperEnv, "I@ceph:common and not ( I@docker:swarm and I@prometheus:server )")
+                for ( node in nodes )
+                {
+                    def versions = salt.cmdRun(pepperEnv, node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+                    versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
+                    if ( versions['client']['group']['release'] != 'luminous' )
+                    {
+                        throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+                    }
                 }
             }
-        }
 
-        stage("unset norebalance") {
+            stage("enable luminous compat") {
+                runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+            }
+
+            stage("enable upmap balancer") {
+                runCephCommand('ceph balancer on')['return'][0].values()[0]
+                runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+            }
+
+            stage("set norebalance") {
+                runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+            }
+
+            stage('Install infra') {
+                orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
+            }
+
+            stage('Install Ceph OSD') {
+                orchestrate.installCephOsd(pepperEnv, HOST)
+            }
+
+            stage("Update/Install monitoring") {
+                def prometheusNodes = salt.getMinions(pepperEnv, 'I@prometheus:server')
+                if (!prometheusNodes.isEmpty()) {
+                    //Collect Grains
+                    salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
+                    salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
+                    salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
+                    sleep(5)
+                    salt.enforceState(pepperEnv, HOST, ['fluentd', 'telegraf', 'prometheus'])
+                    salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+                } else {
+                    common.infoMsg('No Prometheus nodes in cluster. Nothing to do')
+                }
+            }
+
+            stage("Update host files") {
+                salt.enforceState(pepperEnv, '*', 'linux.network.host')
+            }
+
+            def mapping = []
+
+            stage("update mappings") {
+                def pgmap
+                for (int x = 1; x <= 3; x++) {
+                    pgmap = getpgmap()
+                    if (pgmap == '') {
+                        return 1
+                    } else {
+                        pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
+                        generatemapping(pepperEnv, pgmap, mapping)
+                        mapping.each(this.&runCephCommand)
+                        sleep(30)
+                    }
+                }
+            }
+
+            stage("unset norebalance") {
+                runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+            }
+
+            stage("wait for healthy cluster") {
+                ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
+            }
+        }
+        catch (Throwable e) {
+            // There was an error or exception thrown. Unset norebalance.
             runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
-        }
-
-        stage("wait for healthy cluster") {
-            ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
+            throw e
         }
     }
 }