Merge "Add missed fetch for submodules for upgrade-mcp-release" into release/2019.2.0
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
new file mode 100644
index 0000000..96ca29d
--- /dev/null
+++ b/ceph-add-osd-upmap.groovy
@@ -0,0 +1,133 @@
+/**
+ *
+ * Add Ceph node to existing cluster using upmap mechanism
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *  HOST                        Host (minion id) to be added
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+orchestrate = new com.mirantis.mk.Orchestrate()
+
+def waitForHealthy(master, count=0, attempts=100) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand('ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
+def runCephCommand(cmd) {
+  return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+}
+
+def getpgmap(master) {
+  return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+}
+
+def generatemapping(master,pgmap,map) {
+  def pg_new
+  def pg_old
+
+  for ( pg in pgmap )
+  {
+
+    pg_new = pg["up"].minus(pg["acting"])
+    pg_old = pg["acting"].minus(pg["up"])
+
+    for ( i = 0; i < pg_new.size(); i++ )
+    {
+      def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+      map.add(string)
+    }
+
+  }
+}
+
+def pepperEnv = "pepperEnv"
+
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+        stage ("verify client versions")
+        {
+          def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not E@mon*")
+          for ( node in nodes )
+          {
+            def versions = salt.cmdRun("pepperEnv", node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+            versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
+            if ( versions['client']['group']['release'] != 'luminous' )
+            {
+              throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+            }
+          }
+        }
+
+        stage ("enable luminous compat")
+        {
+          runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+        }
+
+        stage ("enable upmap balancer")
+        {
+          runCephCommand('ceph balancer on')['return'][0].values()[0]
+          runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+        }
+
+
+        stage ("set norebalance")
+        {
+          runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+        }
+
+        stage('Install Ceph OSD') {
+            orchestrate.installCephOsd(pepperEnv, HOST)
+        }
+
+        def mapping = []
+
+        stage ("update mappings")
+        {
+          def pgmap1 = getpgmap(pepperEnv)
+          if ( pgmap1 == '' )
+          {
+            return 1
+          }
+          else
+          {
+            def pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap1)
+            for(int x=1; x<=3; x++){
+              pgmap1 = getpgmap(pepperEnv)
+              generatemapping(pepperEnv,pgmap,mapping)
+              mapping.each(this.&runCephCommand)
+              sleep(30)
+            }
+          }
+
+        }
+
+        stage ("unset norebalance")
+        {
+          runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+        }
+
+        stage ("wait for healthy cluster")
+        {
+          waitForHealthy(pepperEnv)
+        }
+
+    }
+}
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index ec232b8..86a1f0f 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -147,6 +147,31 @@
         // create connection to salt master
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
+        stage ('Check user choices') {
+            if (STAGE_UPGRADE_RGW.toBoolean() == true) {
+                // if rgw, check if other stuff has required version
+                def mon_ok = true
+                if (STAGE_UPGRADE_MON.toBoolean() == false) {
+                    def mon_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
+                    mon_ok = mon_v.contains("${TARGET_RELEASE}") && !mon_v.contains("${ORIGIN_RELEASE}")
+                }
+                def mgr_ok = true
+                if (STAGE_UPGRADE_MGR.toBoolean() == false) {
+                    def mgr_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
+                    mgr_ok = mgr_v.contains("${TARGET_RELEASE}") && !mgr_v.contains("${ORIGIN_RELEASE}")
+                }
+                def osd_ok = true
+                if (STAGE_UPGRADE_OSD.toBoolean() == false) {
+                    def osd_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
+                    osd_ok = osd_v.contains("${TARGET_RELEASE}") && !osd_v.contains("${ORIGIN_RELEASE}")
+                }
+                if (!mon_ok || !osd_ok || !mgr_ok) {
+                    common.errorMsg('You may choose stages in any order, but RGW should be upgraded last')
+                    throw new InterruptedException()
+                }
+            }
+        }
+
         if (BACKUP_ENABLED.toBoolean() == true) {
             if (STAGE_UPGRADE_MON.toBoolean() == true) {
                 backup(pepperEnv, 'mon')
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 3084f4b..6b6ec4e 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -433,6 +433,7 @@
                     // Setup kubernetes addons for opencontrail. More info in the definition of the func.
                     orchestrate.setupKubeAddonForContrail(venvPepper, extra_tgt)
                 }
+                orchestrate.installKubernetesClient(venvPepper, extra_tgt)
             }
 
             // install ceph
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 3fd7723..99c661c 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -60,6 +60,17 @@
     wait = "${MINIONS_TEST_TIMEOUT}".toInteger()
 }
 
+def updateSaltPackage(pepperEnv, target, pkgs, masterUpdate = false) {
+    def salt = new com.mirantis.mk.Salt()
+    salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    def minions_reachable = target
+    if (masterUpdate) {
+        // in case of update Salt Master packages - check all minions are good
+        minions_reachable = '*'
+    }
+    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': target, 'target_reachable': minions_reachable])
+}
+
 def updatePkgs(pepperEnv, target, targetType="", targetPackages="") {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
@@ -155,12 +166,10 @@
         // salt master pkg
         if (targetType == 'cfg') {
             common.warningMsg('salt-master pkg upgrade, rerun the pipeline if disconnected')
-            salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-master'], null, true, 5)
-            salt.minionsReachable(pepperEnv, 'I@salt:master', '*', null, wait)
+            updateSaltPackage(pepperEnv, target, '["salt-master"]', true)
         }
         // salt minion pkg
-        salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-minion'], null, true, 5)
-        salt.minionsReachable(pepperEnv, 'I@salt:master', target, null, wait)
+        updateSaltPackage(pepperEnv, target, '["salt-minion"]')
         common.infoMsg('Performing pkg upgrades ... ')
         common.retry(3){
             out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index bd963eb..fe7d189 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -28,6 +28,8 @@
  *   KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH    Сalico/ipam binary hash. Should be null if update rolling via reclass-system level
  *   KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE  Target calico/kube-controllers image. May be null in case of reclass-system rollout.
  *   CALICO_UPGRADE_VERSION                    Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
+ *   KUBERNETES_ETCD_SOURCE      Target etcd binary. May be null in case of reclass-system rollout.
+ *   KUBERNETES_ETCD_SOURCE_HASH Target etcd binary checksum. May be null in case of reclass-system rollout.
  *
 **/
 import groovy.json.JsonSlurper
@@ -87,6 +89,27 @@
     }
 }
 
+def overrideEtcdSource(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+
+    def k8sSaltOverrides = """
+        kubernetes_etcd_source: ${KUBERNETES_ETCD_SOURCE}
+        kubernetes_etcd_source_hash: ${KUBERNETES_ETCD_SOURCE_HASH}
+    """
+    stage("Override etcd binaries to target version") {
+        salt.setSaltOverrides(pepperEnv,  k8sSaltOverrides)
+    }
+}
+
+def performEtcdUpdateAndServicesRestart(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+
+    stage("Performing etcd update and services restart on ${target}") {
+        salt.enforceState(pepperEnv, target, "etcd.server.service")
+        salt.cmdRun(pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl cluster-health")
+    }
+}
+
 def performKubernetesComputeUpdate(pepperEnv, target) {
     def salt = new com.mirantis.mk.Salt()
 
@@ -711,6 +734,17 @@
             }
 
             /*
+                * Execute etcd update
+            */
+            if ((common.validInputParam('KUBERNETES_ETCD_SOURCE')) && (common.validInputParam('KUBERNETES_ETCD_SOURCE_HASH'))) {
+                overrideEtcdSource(pepperEnv)
+            }
+            def targetHostsEtcd = salt.getMinionsSorted(pepperEnv, "I@etcd:server")
+            for (t in targetHostsEtcd) {
+                performEtcdUpdateAndServicesRestart(pepperEnv, t)
+            }
+
+            /*
                 * Execute k8s update
             */
             if (updates.contains("ctl")) {
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 180ed85..2f89659 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -26,9 +26,9 @@
 def probe = 1
 def command = 'cmd.shell'
 
-def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
+def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,contrail-database'
 def thirdPartyControlPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,redis-server,ifmap-server,supervisor'
-def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
+def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,contrail-database'
 def thirdPartyAnalyticsPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,python-cassandra,cassandra-cpp-driver,redis-server,supervisor'
 def cmpPkgs = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
 def neutronServerPkgs = 'neutron-plugin-contrail,contrail-heat,python-contrail'
diff --git a/update-package.groovy b/update-package.groovy
index 10f3a85..9d36f38 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -23,8 +23,15 @@
 def packages
 def command
 def commandKwargs
-def installSaltStack(target, pkgs){
-    salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 30)
+
+def installSaltStack(target, pkgs, masterUpdate = false){
+    salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    def minions_reachable = target
+    if (masterUpdate) {
+        // in case of update Salt Master packages - check all minions are good
+        minions_reachable = '*'
+    }
+    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': target, 'target_reachable': minions_reachable])
 }
 
 timeout(time: 12, unit: 'HOURS') {
@@ -97,7 +104,7 @@
                         common.infoMsg("During salt-minion upgrade on cfg node, pipeline lose connectivy to salt-master for 2 min. If pipeline ended with error rerun pipeline again.")
                         common.retry(10, 5) {
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:master and ${saltTargets[i]}")){
-                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', true)
                             }
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:minion and not I@salt:master and ${saltTargets[i]}")){
                                 installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
@@ -128,7 +135,7 @@
                         common.infoMsg("During salt-minion upgrade on cfg node, pipeline lose connectivy to salt-master for 2 min. If pipeline ended with error rerun pipeline again.")
                         common.retry(10, 5) {
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:master and ${saltTargets[i]}")){
-                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', true)
                             }
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:minion and not I@salt:master and ${saltTargets[i]}")){
                                 installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 14ab3a0..de24a41 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -33,11 +33,9 @@
 }
 
 def updateSaltStack(target, pkgs) {
-    // wait 2 mins when salt-* packages are updated which leads to salt-* services restart
-    common.retry(2, 120) {
-        salt.runSaltProcessStep(venvPepper, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 5)
-    }
-
+    salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    // can't use same function from pipeline lib, as at the moment of running upgrade pipeline Jenkins
+    // still using pipeline lib from current old mcp-version
     common.retry(20, 60) {
         salt.minionsReachable(venvPepper, 'I@salt:master', '*')
         def running = salt.runSaltProcessStep(venvPepper, target, 'saltutil.running', [], null, true, 5)
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
deleted file mode 100644
index b1d4a4e..0000000
--- a/xtrabackup-restore-mysql-db.groovy
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
- *   SALT_MASTER_URL            Full Salt API address [http://10.10.10.1:8000].
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
-    node() {
-
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        stage('Start restore') {
-            // # actual upgrade
-
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
-            }
-            // database restore section
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-            } catch (Exception er) {
-                common.warningMsg('Files are not present')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
-            } catch (Exception er) {
-                common.warningMsg('Directory already exists')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
-            } catch (Exception er) {
-                common.warningMsg('Files were already moved')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
-            } catch (Exception er) {
-                common.warningMsg('Directory already empty')
-            }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-            } catch (Exception er) {
-                common.warningMsg('File is not present')
-            }
-            salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-            _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
-            print(backup_dir)
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'state.apply', ["xtrabackup.client.restore"], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
-
-            // wait until mysql service on galera master is up
-            salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
-
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
-            try {
-                salt.commandStatus(pepperEnv, 'I@galera:slave', 'service mysql status', 'running')
-            } catch (Exception er) {
-                common.warningMsg('Either there are no galera slaves or something failed when starting mysql on galera slaves')
-            }
-            sleep(5)
-            salt.cmdRun(pepperEnv, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
-
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master or I@galera:slave', 'file.touch', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-            } catch (Exception er) {
-                common.warningMsg('File is already present')
-            }
-        }
-    }
-}