Merge "Backporting CVP Shaker to 2019.2.0" into release/2019.2.0
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
new file mode 100644
index 0000000..26ed68e
--- /dev/null
+++ b/ceph-add-osd-upmap.groovy
@@ -0,0 +1,137 @@
+/**
+ *
+ * Add Ceph node to existing cluster using upmap mechanism
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *  HOST                        Host (minion id) to be added
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+orchestrate = new com.mirantis.mk.Orchestrate()
+
+def waitForHealthy(master, count=0, attempts=100) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand('ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
+def runCephCommand(cmd) {
+  return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+}
+
+def getpgmap(master) {
+  return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+}
+
+def generatemapping(master,pgmap,map) {
+  def pg_new
+  def pg_old
+
+  for ( pg in pgmap )
+  {
+
+    pg_new = pg["up"].minus(pg["acting"])
+    pg_old = pg["acting"].minus(pg["up"])
+
+    for ( i = 0; i < pg_new.size(); i++ )
+    {
+      def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+      map.add(string)
+    }
+
+  }
+}
+
+def pepperEnv = "pepperEnv"
+
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+        stage ("verify client versions")
+        {
+          def admin  = salt.getMinions("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin")[0]
+          def versions = salt.cmdRun("pepperEnv", admin, "ceph features", checkResponse=true, batch=null, output=false).values()[0]
+
+          if ( versions[0][admin].contains('jewel') )
+          {
+            throw new Exception("Update all clients to luminous before using this pipeline")
+          }
+        }
+
+        stage ("enable luminous compat")
+        {
+          runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+        }
+
+        stage ("enable upmap balancer")
+        {
+          runCephCommand('ceph balancer on')['return'][0].values()[0]
+          runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+        }
+
+
+        stage ("set norebalance")
+        {
+          runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+        }
+
+        stage('Install Ceph OSD') {
+            orchestrate.installCephOsd(pepperEnv, HOST)
+        }
+
+        def mapping = []
+
+        stage ("update mappings")
+        {
+          def pgmap = getpgmap(pepperEnv)
+          if ( pgmap == '' )
+          {
+            return 1
+          }
+          else
+          {
+            pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
+            for(int x=1; x<=3; x++){
+              pgmap = getpgmap(pepperEnv)
+              if ( pgmap == '' )
+              {
+                return 1
+              }
+              else
+              {
+                pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
+                generatemapping(pepperEnv,pgmap,mapping)
+                mapping.each(this.&runCephCommand)
+              }
+            }
+          }
+
+        }
+
+        stage ("unset norebalance")
+        {
+          runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+        }
+
+        stage ("wait for healthy cluster")
+        {
+          waitForHealthy(pepperEnv)
+        }
+
+    }
+}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 169bbd0..b89f3f2 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -119,8 +119,14 @@
         }
 
         // wait for healthy cluster
-        if (WAIT_FOR_HEALTHY.toBoolean()) {
-            waitForHealthy(pepperEnv)
+        // if (WAIT_FOR_HEALTHY.toBoolean()) {
+        //     waitForHealthy(pepperEnv)
+        // }
+
+        if ( osd_ids == [] )
+        {
+          currentBuild.result = 'SUCCESS'
+          return
         }
 
         // `ceph osd out <id> <id>`
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 601d74d..b78b85a 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -9,245 +9,22 @@
  *  HOST                                Host (minion id) to be removed
  *  ADMIN_HOST                          Host (minion id) with admin keyring and /etc/crushmap file present
  *  OSD                                 Failed OSD ids to be replaced (comma-separated list - 1,2,3)
- *  DEVICE                              Comma separated list of failed devices that will be replaced at HOST (/dev/sdb,/dev/sdc)
- *  JOURNAL_BLOCKDB_BLOCKWAL_PARTITION  Comma separated list of partitions where journal or block_db or block_wal for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
- *  DATA_PARTITION                      Comma separated list of mounted partitions of failed device. These partitions will be unmounted. For ex. /dev/sdb1,/dev/sdb3
  *  CLUSTER_FLAGS                       Comma separated list of tags to apply to cluster
- *  WAIT_FOR_HEALTHY                    Wait for cluster rebalance before stoping daemons
- *  DMCRYPT                             Set to True if replacing osds are/were encrypted
  *
  */
 
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def flags = CLUSTER_FLAGS.tokenize(',')
-def osds = OSD.tokenize(',')
-def devices = DEVICE.tokenize(',')
-def journals_blockdbs_blockwals = JOURNAL_BLOCKDB_BLOCKWAL_PARTITION.tokenize(',')
-def mounted_partitions = DATA_PARTITION.tokenize(',')
-
-
-def runCephCommand(master, target, cmd) {
-    return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, count=0, attempts=300) {
-    // wait for healthy cluster
-    while (count<attempts) {
-        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break;
-        }
-        count++
-        sleep(10)
-    }
-}
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
+      stage ('remove OSD') {
+        build job: 'ceph-remove-osd', parameters: [[$class: 'StringParameterValue', name: 'OSD', value: OSD],[$class: 'StringParameterValue', name: 'HOST', value: HOST],[$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS], [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL], [$class: 'StringParameterValue', name: 'CLUSTER_FLAGS', value: CLUSTER_FLAGS], [$class: 'StringParameterValue', name: 'ADMIN_HOST', value: ADMIN_HOST]]
+      }
 
-        // create connection to salt master
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+      stage ('replace failed disk') {
+        input("Replace failed disk and click proceed")
+      }
 
-        def osd_ids = []
-
-        for (osd_id in osds) {
-            osd_ids.add('osd.' + osd_id)
-            print("Will delete " + osd_id)
-        }
-
-        // `ceph osd out <id> <id>`
-        stage('Set OSDs out') {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
-        }
-
-        // wait for healthy cluster
-        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-            sleep(5)
-            waitForHealthy(pepperEnv)
-        }
-
-
-        if (flags.size() > 0) {
-            stage('Set cluster flags') {
-                for (flag in flags) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
-                }
-            }
-        }
-
-        // stop osd daemons
-        stage('Stop OSD daemons') {
-            for (i in osd_ids) {
-                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
-            }
-        }
-        /*
-        // `ceph osd crush remove osd.2`
-        stage('Remove OSDs from CRUSH') {
-            for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
-            }
-        }
-
-        // wait for pgs to rebalance
-        if (WAIT_FOR_PG_REBALANCE.toBoolean() == true) {
-            stage('Waiting for pgs to rebalance') {
-                while (true) {
-                    def status = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph -s')['return'][0].values()[0]
-                    if (!status.contains('degraded')) {
-                        common.infoMsg('PGs rebalanced')
-                        break;
-                    }
-                    sleep(10)
-                }
-            }
-        }
-        */
-        // remove keyring `ceph auth del osd.3`
-        stage('Remove OSD keyrings from auth') {
-            for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
-            }
-        }
-
-        // remove osd `ceph osd rm osd.3`
-        stage('Remove OSDs') {
-            for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
-            }
-        }
-
-        if (DMCRYPT.toBoolean() == true) {
-
-            // remove partition tables
-            stage('dd / zap device') {
-                for (dev in devices) {
-                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=4096k count=1 conv=notrunc")
-                    try {
-                        runCephCommand(pepperEnv, HOST, "sgdisk --zap-all --clear --mbrtogpt -g -- ${dev}")
-                    } catch (Exception e) {
-                        common.warningMsg(e)
-                    }
-                }
-            }
-
-            // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
-            stage('Remove journal / block_db / block_wal partitions') {
-                for (partition in journals_blockdbs_blockwals) {
-                    if (partition?.trim()) {
-                        // dev = /dev/sdi
-                        def dev = partition.replaceAll("[0-9]", "")
-                        // part_id = 2
-                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
-                        try {
-                            runCephCommand(pepperEnv, HOST, "Ignore | parted ${dev} rm ${part_id}")
-                        } catch (Exception e) {
-                            common.warningMsg(e)
-                        }
-                    }
-                }
-            }
-
-            // reboot
-            stage('reboot and wait') {
-                salt.runSaltProcessStep(pepperEnv, HOST, 'system.reboot', null, null, true, 5)
-                salt.minionsReachable(pepperEnv, 'I@salt:master', HOST)
-                sleep(10)
-            }
-
-
-            // zap disks `ceph-disk zap /dev/sdi`
-            stage('Zap devices') {
-                for (dev in devices) {
-                    try {
-                        runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
-                    } catch (Exception e) {
-                        common.warningMsg(e)
-                    }
-                    runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
-                }
-            }
-
-        } else {
-
-            // umount `umount /dev/sdi1`
-            stage('Umount partitions') {
-                if (mounted_partitions == null || mounted_partitions.empty) {
-                    for (dev in devices) {
-                        try {
-                            runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
-                        } catch (Exception e) {
-                            common.warningMsg(e)
-                        }
-                    }
-                } else {
-                    for (part in mounted_partitions) {
-                        try {
-                            runCephCommand(pepperEnv, HOST, 'umount ' + part)
-                        } catch (Exception e) {
-                            common.warningMsg(e)
-                        }
-                    }
-                }
-            }
-
-            // zap disks `ceph-disk zap /dev/sdi`
-            stage('Zap devices') {
-                for (dev in devices) {
-                    runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
-                }
-            }
-
-            // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
-            stage('Remove journal / block_db / block_wal partitions') {
-                for (partition in journals_blockdbs_blockwals) {
-                    if (partition?.trim()) {
-                        // dev = /dev/sdi
-                        def dev = partition.replaceAll("[0-9]", "")
-                        // part_id = 2
-                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-                        try {
-                            runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
-                        } catch (Exception e) {
-                            common.warningMsg(e)
-                        }
-                    }
-                }
-            }
-        }
-
-        // Deploy failed Ceph OSD
-        stage('Deploy Ceph OSD') {
-            salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
-        }
-
-        // remove cluster flags
-        if (flags.size() > 0) {
-            stage('Unset cluster flags') {
-                for (flag in flags) {
-                    common.infoMsg('Removing flag ' + flag)
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
-                }
-            }
-        }
-
-        /*
-        if (ENFORCE_CRUSHMAP.toBoolean() == true) {
-
-            // enforce crushmap `crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled; ceph osd setcrushmap -i /etc/ceph/crushmap.compiled`
-            stage('Enforce crushmap') {
-
-                stage('Ask for manual confirmation') {
-                    input message: "Are you sure that your ADMIN_HOST has correct /etc/ceph/crushmap file? Click proceed to compile and enforce crushmap."
-                }
-                runCephCommand(pepperEnv, ADMIN_HOST, 'crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled')
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd setcrushmap -i /etc/ceph/crushmap.compiled')
-            }
-        }
-        */
+      stage ('add new osd') {
+        build job: 'ceph-add-osd-upmap', parameters: [[$class: 'StringParameterValue', name: 'HOST', value: HOST], [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS], [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL]]
+      }
     }
-}
\ No newline at end of file
+}
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index ec232b8..86a1f0f 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -147,6 +147,31 @@
         // create connection to salt master
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
+        stage ('Check user choices') {
+            if (STAGE_UPGRADE_RGW.toBoolean() == true) {
+                // if rgw, check if other stuff has required version
+                def mon_ok = true
+                if (STAGE_UPGRADE_MON.toBoolean() == false) {
+                    def mon_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
+                    mon_ok = mon_v.contains("${TARGET_RELEASE}") && !mon_v.contains("${ORIGIN_RELEASE}")
+                }
+                def mgr_ok = true
+                if (STAGE_UPGRADE_MGR.toBoolean() == false) {
+                    def mgr_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
+                    mgr_ok = mgr_v.contains("${TARGET_RELEASE}") && !mgr_v.contains("${ORIGIN_RELEASE}")
+                }
+                def osd_ok = true
+                if (STAGE_UPGRADE_OSD.toBoolean() == false) {
+                    def osd_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
+                    osd_ok = osd_v.contains("${TARGET_RELEASE}") && !osd_v.contains("${ORIGIN_RELEASE}")
+                }
+                if (!mon_ok || !osd_ok || !mgr_ok) {
+                    common.errorMsg('You may choose stages in any order, but RGW should be upgraded last')
+                    throw new InterruptedException()
+                }
+            }
+        }
+
         if (BACKUP_ENABLED.toBoolean() == true) {
             if (STAGE_UPGRADE_MON.toBoolean() == true) {
                 backup(pepperEnv, 'mon')
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 3084f4b..6b6ec4e 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -433,6 +433,7 @@
                     // Setup kubernetes addons for opencontrail. More info in the definition of the func.
                     orchestrate.setupKubeAddonForContrail(venvPepper, extra_tgt)
                 }
+                orchestrate.installKubernetesClient(venvPepper, extra_tgt)
             }
 
             // install ceph
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 3fd7723..99c661c 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -60,6 +60,17 @@
     wait = "${MINIONS_TEST_TIMEOUT}".toInteger()
 }
 
+def updateSaltPackage(pepperEnv, target, pkgs, masterUpdate = false) {
+    def salt = new com.mirantis.mk.Salt()
+    salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    def minions_reachable = target
+    if (masterUpdate) {
+        // in case of update Salt Master packages - check all minions are good
+        minions_reachable = '*'
+    }
+    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': target, 'target_reachable': minions_reachable])
+}
+
 def updatePkgs(pepperEnv, target, targetType="", targetPackages="") {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
@@ -155,12 +166,10 @@
         // salt master pkg
         if (targetType == 'cfg') {
             common.warningMsg('salt-master pkg upgrade, rerun the pipeline if disconnected')
-            salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-master'], null, true, 5)
-            salt.minionsReachable(pepperEnv, 'I@salt:master', '*', null, wait)
+            updateSaltPackage(pepperEnv, target, '["salt-master"]', true)
         }
         // salt minion pkg
-        salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-minion'], null, true, 5)
-        salt.minionsReachable(pepperEnv, 'I@salt:master', target, null, wait)
+        updateSaltPackage(pepperEnv, target, '["salt-minion"]')
         common.infoMsg('Performing pkg upgrades ... ')
         common.retry(3){
             out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 0c657a5..4a231dc 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -31,6 +31,10 @@
     try{
         stage('Initialization') {
             sh "rm -rf ${artifacts_dir}"
+            if (!TARGET_NODE) {
+              // This pillar will return us cid01
+              TARGET_NODE = "I@gerrit:client"
+            }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index b33cda6..e96a34c 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -37,6 +37,10 @@
         try {
             stage('Initialization') {
                 sh "rm -rf ${artifacts_dir}"
+                if (!TEMPEST_TARGET_NODE) {
+                  // This pillar will return us cid01
+                  TEMPEST_TARGET_NODE = "I@gerrit:client"
+                }
                 saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 74c9a63..62f5226 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -27,6 +27,10 @@
     try{
         stage('Initialization') {
             sh "rm -rf ${artifacts_dir}"
+            if (!TARGET_NODE) {
+              // This pillar will return us cid01
+              TARGET_NODE = "I@gerrit:client"
+            }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
diff --git a/galera-cluster-verify-restore.groovy b/galera-cluster-verify-restore.groovy
index 2a575cc..4c1d63a 100644
--- a/galera-cluster-verify-restore.groovy
+++ b/galera-cluster-verify-restore.groovy
@@ -4,6 +4,8 @@
  * Expected parameters:
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
  *   SALT_MASTER_URL            Full Salt API address [http://10.10.10.1:8000].
+ *   ASK_CONFIRMATION           Ask confirmation for restore
+ *   VERIFICATION_RETRIES       Number of restries to verify the restoration.
  *
 **/
 
@@ -11,10 +13,17 @@
 def salt = new com.mirantis.mk.Salt()
 def openstack = new com.mirantis.mk.Openstack()
 def python = new com.mirantis.mk.Python()
-
 def pepperEnv = "pepperEnv"
 def resultCode = 99
 
+askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
+
+if (common.validInputParam(VERIFICATION_RETRIES) && VERIFICATION_RETRIES.isInteger()) {
+    verificationRetries = VERIFICATION_RETRIES.toInteger()
+} else {
+    verificationRetries = 5
+}
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
         stage('Setup virtualenv for Pepper') {
@@ -29,33 +38,52 @@
                 if (resultCode == 129) {
                     common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification and restoration.")
                     currentBuild.result = "FAILURE"
+                    return
                 } else if (resultCode == 130) {
                     common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification and restoration.")
                     currentBuild.result = "FAILURE"
+                    return
                 }
             }
             if (resultCode == 1) {
-                common.warningMsg("There was a problem with parsing the status output or with determining it. Do you want to run a restore?")
+                if(askConfirmation){
+                    common.warningMsg("There was a problem with parsing the status output or with determining it. Do you want to run a restore?")
+                } else {
+                    common.warningMsg("There was a problem with parsing the status output or with determining it. Try to restore.")
+                }
             } else if (resultCode > 1) {
-                common.warningMsg("There's something wrong with the cluster, do you want to run a restore?")
+                if(askConfirmation){
+                    common.warningMsg("There's something wrong with the cluster, do you want to run a restore?")
+                } else {
+                    common.warningMsg("There's something wrong with the cluster, try to restore.")
+                }
             } else {
-                common.warningMsg("There seems to be everything alright with the cluster, do you still want to run a restore?")
+                if(askConfirmation){
+                  common.warningMsg("There seems to be everything alright with the cluster, do you still want to run a restore?")
+                } else {
+                  common.warningMsg("There seems to be everything alright with the cluster, do nothing")
+                }
             }
-            input message: "Are you sure you want to run a restore? Click to confirm"
+            if(askConfirmation){
+              input message: "Are you sure you want to run a restore? Click to confirm"
+            }
             try {
-                openstack.restoreGaleraDb(pepperEnv)
+                if((!askConfirmation && resultCode > 0) || askConfirmation){
+                  openstack.restoreGaleraDb(pepperEnv)
+                }
             } catch (Exception e) {
                 common.errorMsg("Restoration process has failed.")
             }
         }
         stage('Verify restoration result') {
-            exitCode = openstack.verifyGaleraStatus(pepperEnv, false)
-            if (exitCode >= 1) {
-                common.errorMsg("Restoration procedure was probably not successful. See verification report for more information.")
-                currentBuild.result = "FAILURE"
-            } else {
-                common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
-                currentBuild.result = "SUCCESS"
+            common.retry(verificationRetries, 15) {
+                exitCode = openstack.verifyGaleraStatus(pepperEnv, false)
+                if (exitCode >= 1) {
+                    error("Verification attempt finished with an error. This may be caused by cluster not having enough time to come up or to sync. Next verification attempt in 5 seconds.")
+                } else {
+                    common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
+                    currentBuild.result = "SUCCESS"
+                }
             }
         }
     }
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index 8bfe467..8766678 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -4,7 +4,11 @@
   timeout(time: 12, unit: 'HOURS') {
     node() {
       try{
-        def branches = BRANCHES.tokenize(',')
+        if (BRANCHES.equals("*") || BRANCHES.contains('*')) {
+          branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
+        } else {
+          branches = BRANCHES.tokenize(',')
+        }
         def pollBranches = []
         for (i=0; i < branches.size(); i++) {
             pollBranches.add([name:branches[i]])
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index bd963eb..224040f 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -28,6 +28,8 @@
  *   KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH    Сalico/ipam binary hash. Should be null if update rolling via reclass-system level
  *   KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE  Target calico/kube-controllers image. May be null in case of reclass-system rollout.
  *   CALICO_UPGRADE_VERSION                    Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
+ *   KUBERNETES_ETCD_SOURCE      Target etcd binary. May be null in case of reclass-system rollout.
+ *   KUBERNETES_ETCD_SOURCE_HASH Target etcd binary checksum. May be null in case of reclass-system rollout.
  *
 **/
 import groovy.json.JsonSlurper
@@ -87,6 +89,27 @@
     }
 }
 
+def overrideEtcdSource(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+
+    def k8sSaltOverrides = """
+        kubernetes_etcd_source: ${KUBERNETES_ETCD_SOURCE}
+        kubernetes_etcd_source_hash: ${KUBERNETES_ETCD_SOURCE_HASH}
+    """
+    stage("Override etcd binaries to target version") {
+        salt.setSaltOverrides(pepperEnv,  k8sSaltOverrides)
+    }
+}
+
+def performEtcdUpdateAndServicesRestart(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+
+    stage("Performing etcd update and services restart on ${target}") {
+        salt.enforceState(pepperEnv, target, "etcd.server.service")
+        salt.cmdRun(pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl cluster-health")
+    }
+}
+
 def performKubernetesComputeUpdate(pepperEnv, target) {
     def salt = new com.mirantis.mk.Salt()
 
@@ -245,7 +268,7 @@
    def nodeShortName = target.tokenize(".")[0]
    firstTarget = salt.getFirstMinion(pepperEnv, originalTarget)
 
-   status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no | grep ${nodeShortName} | awk '{print \$2}'"
+   status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no ${nodeShortName} | tail -n+2 | awk '{print \$2}'"
    )['return'][0].values()[0].replaceAll('Salt command execution success',''
    ).replaceAll(',SchedulingDisabled','').trim()
 
@@ -262,6 +285,13 @@
 
     stage("Rebooting ${target}") {
         debian.osReboot(pepperEnv, target)
+        /*
+            * Kubernetes controller manager will mark kubernetes node as NotReady
+            * only after 40 seconds of it's downtime.
+            * Let's wait for 60 sec to be sure that node will reach it's
+            * correct status.
+        */
+        sleep(60)
         common.retry(times, delay) {
             if(!isNodeReady(pepperEnv, target)) {
                 error("Node still not in Ready state...")
@@ -322,11 +352,9 @@
 }
 
 def executeConformance(pepperEnv, target, k8s_api, mcp_repo) {
-    stage("Running conformance tests") {
-        def image = buildImageURL(pepperEnv, target, mcp_repo)
-        print("Using image: " + image)
-        runConformance(pepperEnv, target, k8s_api, image)
-    }
+    def image = buildImageURL(pepperEnv, target, mcp_repo)
+    print("Using image: " + image)
+    runConformance(pepperEnv, target, k8s_api, image)
 }
 
 def containerDinstalled(pepperEnv, target) {
@@ -646,21 +674,23 @@
             def daemonsetMap = buildDaemonsetMap(pepperEnv, ctl_node)
 
             if (CONFORMANCE_RUN_BEFORE.toBoolean()) {
-                def target = CTL_TARGET
-                def mcp_repo = ARTIFACTORY_URL
-                def k8s_api = TEST_K8S_API_SERVER
-                firstTarget = salt.getFirstMinion(pepperEnv, target)
-                def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
-                def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
-                def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
-                if (containerd_enabled && containerd_installed && conformance_pod_ready) {
-                    def config = ['master': pepperEnv,
-                                  'target': firstTarget,
-                                  'junitResults': false,
-                                  'autodetect': true]
-                    test.executeConformance(config)
-                } else {
-                    executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+                stage("Perform conformance run before upgrade") {
+                    def target = CTL_TARGET
+                    def mcp_repo = ARTIFACTORY_URL
+                    def k8s_api = TEST_K8S_API_SERVER
+                    firstTarget = salt.getFirstMinion(pepperEnv, target)
+                    def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
+                    def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
+                    def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
+                    if (containerd_enabled && containerd_installed && conformance_pod_ready) {
+                        def config = ['master': pepperEnv,
+                                      'target': firstTarget,
+                                      'junitResults': false,
+                                      'autodetect': true]
+                        test.executeConformance(config)
+                    } else {
+                        executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+                    }
                 }
             }
 
@@ -711,6 +741,17 @@
             }
 
             /*
+                * Execute etcd update
+            */
+            if ((common.validInputParam('KUBERNETES_ETCD_SOURCE')) && (common.validInputParam('KUBERNETES_ETCD_SOURCE_HASH'))) {
+                overrideEtcdSource(pepperEnv)
+            }
+            def targetHostsEtcd = salt.getMinionsSorted(pepperEnv, "I@etcd:server")
+            for (t in targetHostsEtcd) {
+                performEtcdUpdateAndServicesRestart(pepperEnv, t)
+            }
+
+            /*
                 * Execute k8s update
             */
             if (updates.contains("ctl")) {
@@ -778,21 +819,23 @@
             printVersionInfo(pepperEnv, ctl_node)
 
             if (CONFORMANCE_RUN_AFTER.toBoolean()) {
-                def target = CTL_TARGET
-                def mcp_repo = ARTIFACTORY_URL
-                def k8s_api = TEST_K8S_API_SERVER
-                firstTarget = salt.getFirstMinion(pepperEnv, target)
-                def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
-                def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
-                def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
-                if (containerd_enabled && containerd_installed && conformance_pod_ready) {
-                    def config = ['master': pepperEnv,
-                                  'target': firstTarget,
-                                  'junitResults': false,
-                                  'autodetect': true]
-                    test.executeConformance(config)
-                } else {
-                    executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+                stage("Perform conformance run after upgrade") {
+                    def target = CTL_TARGET
+                    def mcp_repo = ARTIFACTORY_URL
+                    def k8s_api = TEST_K8S_API_SERVER
+                    firstTarget = salt.getFirstMinion(pepperEnv, target)
+                    def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
+                    def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
+                    def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
+                    if (containerd_enabled && containerd_installed && conformance_pod_ready) {
+                        def config = ['master': pepperEnv,
+                                      'target': firstTarget,
+                                      'junitResults': false,
+                                      'autodetect': true]
+                        test.executeConformance(config)
+                    } else {
+                        executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+                    }
                 }
             }
         } catch (Throwable e) {
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 180ed85..2f89659 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -26,9 +26,9 @@
 def probe = 1
 def command = 'cmd.shell'
 
-def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
+def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,contrail-database'
 def thirdPartyControlPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,redis-server,ifmap-server,supervisor'
-def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
+def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,contrail-database'
 def thirdPartyAnalyticsPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,python-cassandra,cassandra-cpp-driver,redis-server,supervisor'
 def cmpPkgs = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
 def neutronServerPkgs = 'neutron-plugin-contrail,contrail-heat,python-contrail'
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index 185f097..8afc5a5 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -12,6 +12,14 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+
+def oc3SupervisorServices = ["supervisor-config", "supervisor-control"]
+def oc4ConfigServices = ["contrail-api", "contrail-schema", "contrail-svc-monitor", "contrail-device-manager", "contrail-config-nodemgr"]
+def oc4ControlServices = ["contrail-control", "contrail-named", "contrail-dns", "contrail-control-nodemgr"]
+def zkService = "zookeeper"
+def contrailStatusCheckCmd = "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup"
+def zkDbPath = "/var/lib/zookeeper/version-2"
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
 
@@ -20,65 +28,86 @@
         }
 
         stage('Restore') {
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-config service already stopped')
+
+            def ocVersionPillarKey = salt.getReturnValues(salt.getPillar(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version"))
+
+            if (ocVersionPillarKey == '') {
+                throw new Exception("Cannot get value for _param:opencontrail_version key on I@opencontrail:control:role:primary target")
             }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-control service already stopped')
+
+            def ocVersion = ocVersionPillarKey.toString()
+
+            if (ocVersion >= "4.0") {
+
+                contrailStatusCheckCmd = "doctrail controller ${contrailStatusCheckCmd}"
+                zkDbPath = "/var/lib/config_zookeeper_data/version-2"
+
+                for (service in (oc4ConfigServices + oc4ControlServices + [zkService])) {
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.run', ["doctrail controller systemctl stop ${service}"])
+                    } catch (Exception er) {
+                        common.warningMsg("${service} cannot be stopped inside controller container")
+                    }
+                }
+                // wait until zookeeper service is down
+                salt.commandStatus(pepperEnv, 'I@opencontrail:control', "doctrail controller service ${zkService} status", 'Active: inactive')
+            } else {
+                for (service in (oc3SupervisorServices + [zkService])) {
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ["${service}"])
+                    } catch (Exception er) {
+                        common.warningMsg("${service} service cannot be stopped. It may be already stopped before.")
+                    }
+                }
+                // wait until zookeeper service is down
+                salt.commandStatus(pepperEnv, 'I@opencontrail:control', "service ${zkService} status", "stop")
             }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Zookeeper service already stopped')
-            }
-            //sleep(5)
-            // wait until zookeeper service is down
-            salt.commandStatus(pepperEnv, 'I@opencontrail:control', 'service zookeeper status', 'stop')
 
             try {
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
             } catch (Exception er) {
-                common.warningMsg('Directory already exists')
+                common.warningMsg('/root/zookeeper/zookeeper.bak directory already exists')
             }
 
             try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv ${zkDbPath}/* /root/zookeeper/zookeeper.bak")
             } catch (Exception er) {
                 common.warningMsg('Files were already moved')
             }
             try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf ${zkDbPath}/*")
             } catch (Exception er) {
                 common.warningMsg('Directory already empty')
             }
 
-            _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/zookeeper' }
-            print(backup_dir)
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+            backupDirPillarKey = salt.getPillar(pepperEnv, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
+            backupDir = backupDirPillarKey['return'][0].values()[0]
+            if (backupDir == null || backupDir.isEmpty()) { backupDir='/var/backups/zookeeper' }
+            print(backupDir)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backupDir}/dbrestored"])
 
             // performs restore
             salt.enforceState(pepperEnv, 'I@opencontrail:control', "zookeeper.backup")
 
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
+            if (ocVersion >= "4.0") {
+                for (service in ([zkService] + oc4ConfigServices + oc4ControlServices)) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.run', ["doctrail controller systemctl start ${service}"])
+                }
+            } else {
+                for (service in ([zkService] + oc3SupervisorServices)) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ["${service}"])
+                }
+            }
 
             // wait until contrail-status is up
-            salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+            salt.commandStatus(pepperEnv, 'I@opencontrail:control', contrailStatusCheckCmd, null, false)
 
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "ls ${zkDbPath}")
             try {
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "echo stat | nc localhost 2181")
             } catch (Exception er) {
                 common.warningMsg('Check which node is zookeeper leader')
             }
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
         }
     }
 }
diff --git a/update-package.groovy b/update-package.groovy
index 10f3a85..9d36f38 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -23,8 +23,15 @@
 def packages
 def command
 def commandKwargs
-def installSaltStack(target, pkgs){
-    salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 30)
+
+def installSaltStack(target, pkgs, masterUpdate = false){
+    salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    def minions_reachable = target
+    if (masterUpdate) {
+        // in case of update Salt Master packages - check all minions are good
+        minions_reachable = '*'
+    }
+    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': target, 'target_reachable': minions_reachable])
 }
 
 timeout(time: 12, unit: 'HOURS') {
@@ -97,7 +104,7 @@
                         common.infoMsg("During salt-minion upgrade on cfg node, pipeline lose connectivy to salt-master for 2 min. If pipeline ended with error rerun pipeline again.")
                         common.retry(10, 5) {
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:master and ${saltTargets[i]}")){
-                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', true)
                             }
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:minion and not I@salt:master and ${saltTargets[i]}")){
                                 installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
@@ -128,7 +135,7 @@
                         common.infoMsg("During salt-minion upgrade on cfg node, pipeline lose connectivy to salt-master for 2 min. If pipeline ended with error rerun pipeline again.")
                         common.retry(10, 5) {
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:master and ${saltTargets[i]}")){
-                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', true)
                             }
                             if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:minion and not I@salt:master and ${saltTargets[i]}")){
                                 installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index b229134..fa6e89e 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -33,11 +33,9 @@
 }
 
 def updateSaltStack(target, pkgs) {
-    // wait 2 mins when salt-* packages are updated which leads to salt-* services restart
-    common.retry(2, 120) {
-        salt.runSaltProcessStep(venvPepper, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 5)
-    }
-
+    salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    // can't use same function from pipeline lib, as at the moment of running upgrade pipeline Jenkins
+    // still using pipeline lib from current old mcp-version
     common.retry(20, 60) {
         salt.minionsReachable(venvPepper, 'I@salt:master', '*')
         def running = salt.runSaltProcessStep(venvPepper, target, 'saltutil.running', [], null, true, 5)
@@ -131,6 +129,10 @@
             def updatePipelines = ''
             def updateLocalRepos = ''
             def reclassSystemBranch = ''
+            def reclassSystemBranchDefault = gitTargetMcpVersion
+            if (gitTargetMcpVersion != 'proposed') {
+                reclassSystemBranchDefault = "origin/${gitTargetMcpVersion}"
+            }
             def driteTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
             if (driteTrainParamsYaml) {
                 def driteTrainParams = readYaml text: driteTrainParamsYaml
@@ -140,7 +142,7 @@
                 updateClusterModel = driteTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
                 updatePipelines = driteTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
                 updateLocalRepos = driteTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
-                reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', gitTargetMcpVersion)
+                reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
             } else {
                 // backward compatibility for 2018.11.0
                 saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -149,7 +151,7 @@
                 updateClusterModel = env.getProperty('UPDATE_CLUSTER_MODEL').toBoolean()
                 updatePipelines = env.getProperty('UPDATE_PIPELINES').toBoolean()
                 updateLocalRepos = env.getProperty('UPDATE_LOCAL_REPOS').toBoolean()
-                reclassSystemBranch = gitTargetMcpVersion
+                reclassSystemBranch = reclassSystemBranchDefault
             }
 
             python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
@@ -172,7 +174,7 @@
                 if (updateClusterModel) {
                     common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
                     def dateTime = common.getDatetime()
-                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git submodule update")
+                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git submodule foreach git fetch")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
                         "grep -r --exclude-dir=aptly -l 'mcp_version: .*' * | xargs --no-run-if-empty sed -i 's|mcp_version: .*|mcp_version: \"$targetMcpVersion\"|g'")
                     // Do the same, for deprecated variable-duplicate
@@ -186,9 +188,11 @@
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
                         "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.salt' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.salt/system.linux.system.repo.mcp.apt_mirantis.salt-formulas/g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
-                        "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.contrail' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.salt/system.linux.system.repo.mcp.apt_mirantis.contrail/g'")
+                        "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.contrail' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.contrail/system.linux.system.repo.mcp.apt_mirantis.contrail/g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
-                        "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.salt/system.linux.system.repo.mcp.apt_mirantis.update/g'")
+                        "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.updates/system.linux.system.repo.mcp.apt_mirantis.update/g'")
+                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
                     // Add kubernetes-extra repo
                     if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
deleted file mode 100644
index b1d4a4e..0000000
--- a/xtrabackup-restore-mysql-db.groovy
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
- *   SALT_MASTER_URL            Full Salt API address [http://10.10.10.1:8000].
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
-    node() {
-
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        stage('Start restore') {
-            // # actual upgrade
-
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
-            }
-            // database restore section
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-            } catch (Exception er) {
-                common.warningMsg('Files are not present')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
-            } catch (Exception er) {
-                common.warningMsg('Directory already exists')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
-            } catch (Exception er) {
-                common.warningMsg('Files were already moved')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
-            } catch (Exception er) {
-                common.warningMsg('Directory already empty')
-            }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-            } catch (Exception er) {
-                common.warningMsg('File is not present')
-            }
-            salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-            _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
-            print(backup_dir)
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'state.apply', ["xtrabackup.client.restore"], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
-
-            // wait until mysql service on galera master is up
-            salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
-
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
-            try {
-                salt.commandStatus(pepperEnv, 'I@galera:slave', 'service mysql status', 'running')
-            } catch (Exception er) {
-                common.warningMsg('Either there are no galera slaves or something failed when starting mysql on galera slaves')
-            }
-            sleep(5)
-            salt.cmdRun(pepperEnv, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
-
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master or I@galera:slave', 'file.touch', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-            } catch (Exception er) {
-                common.warningMsg('File is already present')
-            }
-        }
-    }
-}