Merge "Add universal code to handle different test frameworks"
diff --git a/aptly-promote-pipeline.groovy b/aptly-promote-pipeline.groovy
index 0f51a90..dfddfcd 100644
--- a/aptly-promote-pipeline.groovy
+++ b/aptly-promote-pipeline.groovy
@@ -26,7 +26,7 @@
 def insufficientPermissions = false
 
 timeout(time: 12, unit: 'HOURS') {
-  node() {
+  node("docker&&hardware") {
     try {
       stage("promote") {
         // promote is restricted to users in aptly-promote-users LDAP group
@@ -36,7 +36,9 @@
               if (storage == "local") {
                 storage = ""
               }
-              aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
+              retry(2){
+                aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
+              }
             }
           }
         }else{
diff --git a/build-debian-packages-influxdb-relay.groovy b/build-debian-packages-influxdb-relay.groovy
index 3a9bc03..14b1190 100644
--- a/build-debian-packages-influxdb-relay.groovy
+++ b/build-debian-packages-influxdb-relay.groovy
@@ -79,7 +79,9 @@
                         }
                         stage("publish") {
                             aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                            aptly.publish(APTLY_URL)
+                            retry(2){
+                              aptly.publish(APTLY_URL)
+                            }
                         }
                     }
                 }
diff --git a/build-debian-packages-jmx-exporter.groovy b/build-debian-packages-jmx-exporter.groovy
index d6e7fbd..eaa9f05 100644
--- a/build-debian-packages-jmx-exporter.groovy
+++ b/build-debian-packages-jmx-exporter.groovy
@@ -58,7 +58,9 @@
 
                 stage("publish") {
                     aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                    aptly.publish(APTLY_URL)
+                    retry(2){
+                      aptly.publish(APTLY_URL)
+                    }
                 }
             }
 
diff --git a/build-debian-packages-libvirt-exporter.groovy b/build-debian-packages-libvirt-exporter.groovy
index 2051504..4c3f732 100644
--- a/build-debian-packages-libvirt-exporter.groovy
+++ b/build-debian-packages-libvirt-exporter.groovy
@@ -60,7 +60,9 @@
                     }
                     stage("publish") {
                         aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                        aptly.publish(APTLY_URL)
+                        retry(2){
+                          aptly.publish(APTLY_URL)
+                        }
                     }
                 }
             }
diff --git a/build-debian-packages-pipeline.groovy b/build-debian-packages-pipeline.groovy
index 8d46e8e..5d16339 100644
--- a/build-debian-packages-pipeline.groovy
+++ b/build-debian-packages-pipeline.groovy
@@ -135,7 +135,9 @@
 
           stage("publish") {
             aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-            aptly.publish(APTLY_URL)
+            retry(2){
+              aptly.publish(APTLY_URL)
+            }
           }
         }
       }
diff --git a/build-debian-packages-prometheus-relay.groovy b/build-debian-packages-prometheus-relay.groovy
index 45e5b10..f101f57 100644
--- a/build-debian-packages-prometheus-relay.groovy
+++ b/build-debian-packages-prometheus-relay.groovy
@@ -79,7 +79,9 @@
                         }
                         stage("publish") {
                             aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                            aptly.publish(APTLY_URL)
+                            retry(2){
+                              aptly.publish(APTLY_URL)
+                            }
                         }
 
                         stage("rebuild docker images") {
diff --git a/build-debian-packages-telegraf.groovy b/build-debian-packages-telegraf.groovy
index 16d6259..b48c4fa 100644
--- a/build-debian-packages-telegraf.groovy
+++ b/build-debian-packages-telegraf.groovy
@@ -78,7 +78,9 @@
                         }
                         stage("publish") {
                             aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                            aptly.publish(APTLY_URL)
+                            retry(2){
+                              aptly.publish(APTLY_URL)
+                            }
                         }
 
                         stage("rebuild docker images") {
diff --git a/build-extra-dpdk-pipeline.groovy b/build-extra-dpdk-pipeline.groovy
index 4d096ae..e9ac981 100644
--- a/build-extra-dpdk-pipeline.groovy
+++ b/build-extra-dpdk-pipeline.groovy
@@ -53,7 +53,9 @@
         }
         stage("publish") {
             aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-            aptly.publish(APTLY_URL)
+            retry(2){
+              aptly.publish(APTLY_URL)
+            }
         }
        }
        } catch (Throwable e) {
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index a37a655..c2f9351 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -579,7 +579,17 @@
 
             stage('Finalize') {
                 if (common.checkContains('STACK_INSTALL', 'finalize')) {
-                    salt.runSaltProcessStep(venvPepper, '*', 'state.apply', [], null, true)
+                    def gluster_compound = 'I@glusterfs:server'
+                    // Enforce highstate asynchronous only on the nodes which are not glusterfs servers
+                    salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound)
+                    // Iterate over nonempty set of gluster servers and apply highstates one by one
+                    // TODO: switch to batch once salt 2017.7+ would be used
+                    def glusterMinions = salt.getMinionsSorted(venvPepper, gluster_compound)
+                    if ( !glusterMinions.isEmpty() ) {
+                        for ( target in glusterMinions ) {
+                            salt.enforceHighstate(venvPepper, target)
+                        }
+                    }
                 }
 
                 outputsPretty = common.prettify(outputs)
diff --git a/cloud-update.groovy b/cloud-update.groovy
new file mode 100644
index 0000000..fd9cbce
--- /dev/null
+++ b/cloud-update.groovy
@@ -0,0 +1,1560 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *   SNAPSHOT_NAME              Snapshot name
+ *   CFG_NODE_PROVIDER          Physical machine name hosting Salt-Master VM (ex. kvm01*)
+ *   INTERACTIVE                Ask interactive questions during pipeline run (bool)
+ *   PER_NODE                   Target nodes will be managed one by one (bool)
+ *   ROLLBACK_BY_REDEPLOY       Omit taking live snapshots. Rollback is planned to be done by redeployment (bool)
+ *   STOP_SERVICES              Stop API services before update (bool)
+ *   TARGET_UPDATES             Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
+ *   TARGET_ROLLBACKS           Comma separated list of nodes to rollback (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
+ *   TARGET_SNAPSHOT_MERGES     Comma separated list of nodes to merge live snapshot for (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
+ *   CTL_TARGET                 Salt targeted CTL nodes (ex. ctl*)
+ *   PRX_TARGET                 Salt targeted PRX nodes (ex. prx*)
+ *   MSG_TARGET                 Salt targeted MSG nodes (ex. msg*)
+ *   DBS_TARGET                 Salt targeted DBS nodes (ex. dbs*)
+ *   LOG_TARGET                 Salt targeted LOG nodes (ex. log*)
+ *   MON_TARGET                 Salt targeted MON nodes (ex. mon*)
+ *   MTR_TARGET                 Salt targeted MTR nodes (ex. mtr*)
+ *   NTW_TARGET                 Salt targeted NTW nodes (ex. ntw*)
+ *   NAL_TARGET                 Salt targeted NAL nodes (ex. nal*)
+ *   CMN_TARGET                 Salt targeted CMN nodes (ex. cmn*)
+ *   RGW_TARGET                 Salt targeted RGW nodes (ex. rgw*)
+ *   CID_TARGET                 Salt targeted CID nodes (ex. cid*)
+ *   CMP_TARGET                 Salt targeted physical compute nodes (ex. cmp001*)
+ *   KVM_TARGET                 Salt targeted physical KVM nodes (ex. kvm01*)
+ *   CEPH_OSD_TARGET            Salt targeted physical Ceph OSD nodes (ex. osd001*)
+ *   GTW_TARGET                 Salt targeted physical or virtual GTW nodes (ex. gtw01*)
+ *   REBOOT                     Reboot nodes after update (bool)
+ *   ROLLBACK_PKG_VERSIONS      Space separated list of pkgs=versions to rollback to on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ *   PURGE_PKGS                 Space separated list of pkgs=versions to be purged on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ *   REMOVE_PKGS                Space separated list of pkgs=versions to be removed on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ *   RESTORE_GALERA             Restore Galera DB (bool)
+ *   RESTORE_CONTRAIL_DB        Restore Cassandra and Zookeeper DBs for OpenContrail (bool)
+ *
+**/
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def virsh = new com.mirantis.mk.Virsh()
+
+def updates = TARGET_UPDATES.tokenize(",").collect{it -> it.trim()}
+def rollbacks = TARGET_ROLLBACKS.tokenize(",").collect{it -> it.trim()}
+def merges = TARGET_SNAPSHOT_MERGES.tokenize(",").collect{it -> it.trim()}
+
+def pepperEnv = "pepperEnv"
+def minions
+def result
+def packages
+def command
+def commandKwargs
+
+def updatePkgs(pepperEnv, target, targetType="", targetPackages="") {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def commandKwargs
+    def distUpgrade
+    def pkgs
+    def out
+
+    salt.enforceState(pepperEnv, target, 'linux.system.repo')
+
+    stage("List package upgrades") {
+        common.infoMsg("Listing all the packages that have a new update available on ${target}")
+        pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', [], null, true))
+        if(targetPackages != "" && targetPackages != "*"){
+            common.infoMsg("Note that only the ${targetPackages} would be installed from the above list of available updates on the ${target}")
+        }
+    }
+
+    if (INTERACTIVE.toBoolean()) {
+        stage("Confirm live package upgrades on ${target}") {
+            if (targetPackages=="") {
+                def userInput = input(
+                 id: 'userInput', message: 'Insert package names for update', parameters: [
+                 [$class: 'TextParameterDefinition', defaultValue: pkgs.keySet().join(",").toString(), description: 'Package names (or *)', name: 'packages']
+                ])
+                if (userInput!= "" && userInput!= "*") {
+                    targetPackages = userInput
+                }
+            } else {
+                input message: "Approve live package upgrades on ${target} nodes?"
+            }
+        }
+    } else {
+        targetPackages = pkgs.keySet().join(",").toString()
+    }
+
+    if (targetPackages != "") {
+        // list installed versions of pkgs that will be upgraded
+        def installedPkgs = []
+        def newPkgs = []
+        def targetPkgList = targetPackages.tokenize(',')
+        for (pkg in targetPkgList) {
+            def version
+            try {
+                def pkgsDetails = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.info_installed', [pkg], null, true))
+                version = pkgsDetails.get(pkg).get('version')
+            } catch (Exception er) {
+                common.infoMsg("${pkg} not installed yet")
+            }
+            if (version?.trim()) {
+                installedPkgs.add(pkg + '=' + version)
+            } else {
+                newPkgs.add(pkg)
+            }
+        }
+        common.warningMsg("the following list of pkgs will be upgraded")
+        common.warningMsg(installedPkgs.join(" "))
+        common.warningMsg("the following list of pkgs will be newly installed")
+        common.warningMsg(newPkgs.join(" "))
+        // set variables
+        command = "pkg.install"
+        packages = targetPackages
+        commandKwargs = ['only_upgrade': 'true','force_yes': 'true']
+
+    }else {
+        command = "pkg.upgrade"
+        commandKwargs = ['dist_upgrade': 'true']
+        distUpgrade = true
+        packages = null
+    }
+
+    // todo exception to cfg or cicd
+    stage("stop services on ${target}") {
+        if ((STOP_SERVICES.toBoolean()) && (targetType != 'cicd')) {
+            if (targetType == 'contrail') {
+                stopContrailServices(pepperEnv, target)
+            } else {
+                def probe = salt.getFirstMinion(pepperEnv, "${target}")
+                stopServices(pepperEnv, probe, target)
+            }
+        }
+    }
+
+    stage('Apply package upgrades') {
+        // salt master pkg
+        if (targetType == 'I@salt:master') {
+            common.warningMsg('salt-master pkg upgrade, rerun the pipeline if disconnected')
+            salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-master'], null, true, 5)
+            salt.minionsReachable(pepperEnv, 'I@salt:master', '*')
+        }
+        // salt minion pkg
+        salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-minion'], null, true, 5)
+        salt.minionsReachable(pepperEnv, 'I@salt:master', target)
+        common.infoMsg('Performing pkg upgrades ... ')
+        common.retry(3){
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
+            salt.printSaltCommandResult(out)
+        }
+        def osRelease = salt.getGrain(pepperEnv, target, 'lsb_distrib_codename')
+        if (osRelease.toString().toLowerCase().contains('trusty')) {
+            args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --force-yes -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" '
+        } else {
+            args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q -f --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" '
+        }
+        if (out.toString().contains('errors:')) {
+            try {
+                if (packages?.trim()) {
+                    packages = packages.replaceAll(',', ' ')
+                    common.retry(3){
+                        out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' install ' + packages])
+                    }
+                } else {
+                    if (distUpgrade) {
+                        common.retry(3){
+                            out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' dist-upgrade'])
+                        }
+                    } else {
+                        common.retry(3){
+                            out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' upgrade'])
+                        }
+                    }                    }
+                if (out.toString().contains('E: ')) {
+                    common.errorMsg(out)
+                    if (INTERACTIVE.toBoolean()) {
+                        input message: "Pkgs update failed to be updated on ${target}. Please fix it manually."
+                    } else {
+                        salt.printSaltCommandResult(out)
+                        throw new Exception("Pkgs update failed")
+                    }
+                }
+            } catch (Exception e) {
+                common.errorMsg(out)
+                common.errorMsg(e)
+                if (INTERACTIVE.toBoolean()) {
+                    input message: "Pkgs update failed to be updated on ${target}. Please fix it manually."
+                } else {
+                    throw new Exception("Pkgs update failed")
+                }
+            }
+        }
+    }
+}
+
+def rollbackPkgs(pepperEnv, target, targetType = "", targetPackages="") {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def probe = salt.getFirstMinion(pepperEnv, "${target}")
+    def distUpgrade
+    def pkgs
+    def out
+
+    salt.enforceState(pepperEnv, target, 'linux.system.repo')
+
+    if (ROLLBACK_PKG_VERSIONS == "") {
+        stage("List package upgrades") {
+            common.infoMsg("Listing all the packages that have a new update available on ${target}")
+            pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', [], null, true))
+            if(targetPackages != "" && targetPackages != "*"){
+                common.infoMsg("Note that only the ${targetPackages} would be installed from the above list of available updates on the ${target}")
+            }
+        }
+
+        if (INTERACTIVE.toBoolean()) {
+            stage("Confirm live package upgrades on ${target}") {
+                if(targetPackages==""){
+                    timeout(time: 2, unit: 'HOURS') {
+                        def userInput = input(
+                         id: 'userInput', message: 'Insert package names for update', parameters: [
+                         [$class: 'TextParameterDefinition', defaultValue: pkgs.keySet().join(",").toString(), description: 'Package names (or *)', name: 'packages']
+                        ])
+                        if(userInput!= "" && userInput!= "*"){
+                            targetPackages = userInput
+                        }
+                    }
+                }else{
+                    timeout(time: 2, unit: 'HOURS') {
+                       input message: "Approve live package upgrades on ${target} nodes?"
+                    }
+                }
+            }
+        } else {
+            targetPackages = pkgs.keySet().join(",").toString()
+        }
+    } else {
+        targetPackages = ROLLBACK_PKG_VERSIONS
+    }
+
+    if (targetPackages != "") {
+        // set variables
+        packages = targetPackages
+    } else {
+        distUpgrade = true
+        packages = null
+    }
+
+    stage("stop services on ${target}") {
+        try {
+            if (INTERACTIVE.toBoolean()) {
+                input message: "Click PROCEED to interactively stop services on ${target}. Otherwise click ABORT to skip stopping them and continue."
+            }
+        } catch (Exception er) {
+            common.infoMsg('skipping stopping services')
+            return
+        }
+        if (STOP_SERVICES.toBoolean()) {
+            stopServices(pepperEnv, probe, target)
+        }
+    }
+
+    stage('Apply package downgrades') {
+        args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" '
+        common.infoMsg('Performing pkgs purge/remove ... ')
+        try {
+            if (PURGE_PKGS != "") {
+                def purgePackages = PURGE_PKGS.replaceAll(',', ' ')
+                common.retry(3){
+                    out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' purge ' + purgePackages])
+                }
+            }
+            if (REMOVE_PKGS != "") {
+                def removePackages = REMOVE_PKGS.replaceAll(',', ' ')
+                common.retry(3){
+                    out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' remove ' + removePackages])
+                }
+            }
+            if (out.toString().contains('E: ')) {
+                common.errorMsg(out)
+                if (INTERACTIVE.toBoolean()) {
+                    input message: "Pkgs ${packages} purge failed on ${target}. Please fix it manually."
+                } else {
+                    salt.printSaltCommandResult(out)
+                    throw new Exception("Pkgs {packages} purge failed")
+                }
+            }
+        } catch (Exception e) {
+            common.errorMsg(out)
+            common.errorMsg(e)
+            if (INTERACTIVE.toBoolean()) {
+                input message: "Pkgs {packages} purge on ${target}. Please fix it manually."
+            } else {
+                throw new Exception("Pkgs {packages} purge failed")
+            }
+        }
+
+        common.infoMsg('Performing pkg downgrades ... ')
+        try {
+            packages = packages.replaceAll(',', ' ')
+            if (packages?.trim()) {
+                packages = packages.replaceAll(',', ' ')
+                common.retry(3){
+                    out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' install salt-minion'], null, true, 5)
+                }
+                salt.minionsReachable(pepperEnv, 'I@salt:master', target)
+                common.retry(3){
+                    out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' install ' + packages])
+                }
+            } else {
+                if (distUpgrade) {
+                    common.retry(3){
+                        out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' dist-upgrade'])
+                    }
+                } else {
+                    common.retry(3){
+                        out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' upgrade'])
+                    }
+                }
+            }
+            if (out.toString().contains('E: ')) {
+                common.errorMsg(out)
+                if (INTERACTIVE.toBoolean()) {
+                    input message: "Pkgs rollback failed on ${target}. Please fix it manually."
+                } else {
+                    salt.printSaltCommandResult(out)
+                    throw new Exception("Pkgs rollback failed")
+                }
+            }
+        } catch (Exception e) {
+            common.errorMsg(out)
+            common.errorMsg(e)
+            if (INTERACTIVE.toBoolean()) {
+                input message: "Pkgs rollback failed on ${target}. Please fix it manually."
+            } else {
+                throw new Exception("Pkgs rollback failed")
+            }
+        }
+    }
+}
+
+def getCfgNodeProvider(pepperEnv, master_name) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    if (!CFG_NODE_PROVIDER?.trim()) {
+        kvms = salt.getMinions(pepperEnv, 'I@salt:control')
+        for (kvm in kvms) {
+            try {
+                vms = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, kvm, 'virt.list_active_vms', [], null, true))
+                if (vms.toString().contains(master_name)) {
+                    CFG_NODE_PROVIDER = kvm
+                    //break
+                }
+            } catch (Exception er) {
+                common.infoMsg("${master_name} not present on ${kvm}")
+            }
+        }
+    }
+}
+
+/*
+def rollbackSaltMaster(pepperEnv, target, path='/var/lib/libvirt/images') {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    try {
+        input message: "PART1 - Are you sure to rollback ${target}? To rollback click on PROCEED. To skip rollback PART1 click on ABORT."
+    } catch (Exception er) {
+        common.infoMsg("skipping rollback of ${target}")
+        return
+    }
+    def domain = salt.getDomainName(pepperEnv)
+    def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
+    getCfgNodeProvider(pepperEnv, master)
+    try {
+        try {
+            salt.getReturnValues(salt.cmdRun(pepperEnv, CFG_NODE_PROVIDER, "ls -la ${path}/${master}.${domain}.xml"))
+            common.errorMsg('Pipeline is about to disconnect from salt-api. You will have to rerun the pipeline with ROLLBACK_CFG checked and skip PART1 to finish rollback.')
+            salt.cmdRun(pepperEnv, CFG_NODE_PROVIDER, "virsh destroy ${master}.${domain}; virsh define ${path}/${master}.${domain}.xml; virsh start ${master}.${domain} ")
+        } catch (Exception er) {
+            common.errorMsg(er)
+            input message: "Rollback for ${target} failed. Rollback manually."
+        }
+    } catch (Exception er) {
+        common.errorMsg(er)
+        input message: "Rollback for ${target} failed. Rollback manually."
+    }
+}
+
+def finishSaltMasterRollback(pepperEnv, target, path='/var/lib/libvirt/images') {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def virsh = new com.mirantis.mk.Virsh()
+    try {
+        input message: "PART2 - Are you sure to finalize ${target} rollback? Click on PROCEED. To skip rollback click on ABORT."
+    } catch (Exception er) {
+        common.infoMsg("skipping finalize rollback of ${target}")
+        return
+    }
+    salt.minionsReachable(pepperEnv, 'I@salt:master', '*')
+    def domain = salt.getDomainName(pepperEnv)
+    def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
+    getCfgNodeProvider(pepperEnv, master)
+    try {
+        virsh.liveSnapshotAbsent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME, path)
+        // purge and setup previous repos
+        salt.enforceState(pepperEnv, target, 'linux.system.repo')
+    } catch (Exception e) {
+        common.errorMsg(e)
+        input message: "Check what failed after ${target} rollback. Do you want to PROCEED?"
+    }
+}*/
+
+def stopServices(pepperEnv, probe, target) {
+    def openstack = new com.mirantis.mk.Openstack()
+    def services = []
+    services.add('keepalived')
+    services.add('nginx')
+    services.add('haproxy')
+    services.add('nova-api')
+    services.add('cinder')
+    services.add('glance')
+    services.add('heat')
+    services.add('neutron')
+    services.add('apache2')
+    services.add('rabbitmq-server')
+    if (INTERACTIVE.toBoolean()) {
+        openstack.stopServices(pepperEnv, probe, target, services, true)
+    } else {
+        openstack.stopServices(pepperEnv, probe, target, services)
+    }
+}
+
+// must be stopped separately due to OC on Trusty
+def stopContrailServices(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def services = []
+    services.add('keepalived')
+    services.add('haproxy')
+    services.add('supervisor-control')
+    services.add('supervisor-config')
+    services.add('supervisor-database')
+    services.add('zookeeper')
+    services.add('ifmap-server')
+    for (s in services) {
+        try {
+            salt.runSaltProcessStep(pepperEnv, target, 'service.stop', [s], null, true)
+        } catch (Exception er) {
+            common.infoMsg(er)
+        }
+    }
+}
+
+def highstate(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+
+    stage("Apply highstate on ${target} nodes") {
+        try {
+            common.retry(3){
+                salt.enforceHighstate(pepperEnv, target)
+            }
+        } catch (Exception e) {
+            common.errorMsg(e)
+            if (INTERACTIVE.toBoolean()) {
+                input message: "Highstate failed on ${target}. Fix it manually or run rollback on ${target}."
+            } else {
+                throw new Exception("highstate failed")
+            }
+        }
+    }
+    // optionally reboot
+    if (REBOOT.toBoolean()) {
+        stage("Reboot ${target} nodes") {
+            salt.runSaltProcessStep(pepperEnv, target, 'system.reboot', null, null, true, 5)
+            sleep(10)
+            salt.minionsReachable(pepperEnv, 'I@salt:master', target)
+        }
+    }
+}
+
+def rollback(pepperEnv, tgt, generalTarget) {
+    def common = new com.mirantis.mk.Common()
+    try {
+        if (INTERACTIVE.toBoolean()) {
+            input message: "Are you sure to rollback ${generalTarget}? To rollback click on PROCEED. To skip rollback click on ABORT."
+        }
+    } catch (Exception er) {
+        common.infoMsg('skipping rollback')
+        return
+    }
+    try {
+        rollbackLiveSnapshot(pepperEnv, tgt, generalTarget)
+    } catch (Exception err) {
+        common.errorMsg(err)
+        if (INTERACTIVE.toBoolean()) {
+            input message: "Rollback for ${tgt} failed please fix it manually before clicking PROCEED."
+        } else {
+            throw new Exception("Rollback failed for ${tgt}")
+        }
+    }
+}
+
+def liveSnapshot(pepperEnv, tgt, generalTarget) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def virsh = new com.mirantis.mk.Virsh()
+    def domain = salt.getDomainName(pepperEnv)
+    def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
+    common.warningMsg(target_hosts)
+    //def nodeCount = 1
+    for (t in target_hosts) {
+        def target = salt.stripDomainName(t)
+        def nodeCount = target[4]
+        common.warningMsg(nodeCount)
+        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
+        //nodeCount++
+    }
+}
+
+def mergeSnapshot(pepperEnv, tgt, generalTarget='') {
+    def salt = new com.mirantis.mk.Salt()
+    def virsh = new com.mirantis.mk.Virsh()
+    def domain = salt.getDomainName(pepperEnv)
+    def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
+    def nodeCount = 1
+    for (t in target_hosts) {
+        if (tgt == 'I@salt:master') {
+            def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
+            getCfgNodeProvider(pepperEnv, master)
+            virsh.liveSnapshotMerge(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
+        } else {
+            def target = salt.stripDomainName(t)
+            def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+            virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
+        }
+        nodeCount++
+    }
+    salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+}
+
+
+
+def rollbackLiveSnapshot(pepperEnv, tgt, generalTarget) {
+    def salt = new com.mirantis.mk.Salt()
+    def virsh = new com.mirantis.mk.Virsh()
+    def common = new com.mirantis.mk.Common()
+    def domain = salt.getDomainName(pepperEnv)
+    def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
+    // first destroy all vms
+    def nodeCount = 1
+    for (t in target_hosts) {
+        def target = salt.stripDomainName(t)
+        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.destroy', ["${target}.${domain}"], null, true)
+        nodeCount++
+    }
+    nodeCount = 1
+    // rollback vms
+    for (t in target_hosts) {
+        def target = salt.stripDomainName(t)
+        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        virsh.liveSnapshotRollback(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
+        nodeCount++
+    }
+    try {
+        salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+        // purge and setup previous repos
+        salt.enforceState(pepperEnv, tgt, 'linux.system.repo')
+    } catch (Exception e) {
+        common.errorMsg(e)
+        if (INTERACTIVE.toBoolean()) {
+            input message: "Salt state linux.system.repo on ${tgt} failed. Do you want to PROCEED?."
+        } else {
+            throw new Exception("Salt state linux.system.repo on ${tgt} failed")
+        }
+    }
+}
+
+def removeNode(pepperEnv, tgt, generalTarget) {
+    def salt = new com.mirantis.mk.Salt()
+    def virsh = new com.mirantis.mk.Virsh()
+    def common = new com.mirantis.mk.Common()
+    def domain = salt.getDomainName(pepperEnv)
+    def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
+    // first destroy all vms
+    def nodeCount = 1
+    for (t in target_hosts) {
+        def target = salt.stripDomainName(t)
+        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.destroy', ["${target}.${domain}"], null, true)
+        salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.undefine', ["${target}.${domain}"], null, true)
+        try {
+            salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+        } catch (Exception e) {
+            common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+        }
+        nodeCount++
+    }
+}
+
+def saltMasterBackup(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    salt.enforceState(pepperEnv, 'I@salt:master', 'backupninja')
+    salt.cmdRun(pepperEnv, 'I@salt:master', "su root -c 'backupninja -n --run /etc/backup.d/200.backup.rsync'")
+}
+
+def backupCeph(pepperEnv, tgt) {
+    def salt = new com.mirantis.mk.Salt()
+    salt.enforceState(pepperEnv, 'I@ceph:backup:server', 'ceph.backup')
+    salt.enforceState(pepperEnv, "I@ceph:backup:client and ${tgt}", 'ceph.backup')
+    salt.cmdRun(pepperEnv, "I@ceph:backup:client and ${tgt}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh -s'")
+}
+
+def backupGalera(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    salt.enforceState(pepperEnv, 'I@xtrabackup:server', ['linux.system.repo', 'xtrabackup'])
+    salt.enforceState(pepperEnv, 'I@xtrabackup:client', ['linux.system.repo', 'openssh.client'])
+    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -s -f'")
+}
+
+// cluster galera - wsrep_cluster_size
+def clusterGalera(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    try {
+        salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'])
+    } catch (Exception er) {
+        common.warningMsg('Mysql service already stopped')
+    }
+    try {
+        salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'])
+    } catch (Exception er) {
+        common.warningMsg('Mysql service already stopped')
+    }
+    try {
+        salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+    } catch (Exception er) {
+        common.warningMsg('Files are not present')
+    }
+    salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+    salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'])
+    // wait until mysql service on galera master is up
+    try {
+        salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
+    } catch (Exception er) {
+        if (INTERACTIVE.toBoolean()) {
+            input message: "Database is not running please fix it first and only then click on PROCEED."
+        } else {
+            throw new Exception("Database is not running correctly")
+        }
+    }
+    salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'])
+}
+
+def restoreGalera(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def openstack = new com.mirantis.mk.Openstack()
+    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "rm -rf /var/lib/mysql/*")
+    openstack.restoreGaleraDb(pepperEnv)
+}
+
+def backupZookeeper(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
+    salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
+    try {
+        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh -s'")
+    } catch (Exception er) {
+        throw new Exception('Zookeeper failed to backup. Please fix it before continuing.')
+    }
+}
+
+def backupCassandra(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+
+    salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
+    salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
+    try {
+        salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh -s'")
+    } catch (Exception er) {
+        throw new Exception('Cassandra failed to backup. Please fix it before continuing.')
+    }
+}
+
+def backupContrail(pepperEnv) {
+    backupZookeeper(pepperEnv)
+    backupCassandra(pepperEnv)
+}
+
+// cassandra and zookeeper
+def restoreContrailDb(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    build job: "deploy-zookeeper-restore", parameters: [
+      [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+      [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL]
+    ]
+    build job: "deploy-cassandra-db-restore", parameters: [
+      [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+      [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL]
+    ]
+}
+
+def verifyAPIs(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def out = salt.cmdRun(pepperEnv, target, '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+    if (out.toString().toLowerCase().contains('error')) {
+        common.errorMsg(out)
+        if (INTERACTIVE.toBoolean()) {
+            input message: "APIs are not working as expected. Please fix it manually."
+        } else {
+            throw new Exception("APIs are not working as expected")
+        }
+    }
+}
+
+def verifyGalera(pepperEnv, target, count=0, maxRetries=200) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def out
+    while(count < maxRetries) {
+        try {
+            out = salt.getReturnValues(salt.cmdRun(pepperEnv, target, 'salt-call mysql.status | grep -A1 wsrep_cluster_size'))
+        } catch (Exception er) {
+            common.infoMsg(er)
+        }
+        if ((!out.toString().contains('wsrep_cluster_size')) || (out.toString().contains('0'))) {
+            count++
+            if (count == maxRetries) {
+                if (INTERACTIVE.toBoolean()) {
+                    input message: "Galera is not working as expected. Please check it and fix it first before clicking on PROCEED."
+                } else {
+                    common.errorMsg(out)
+                    throw new Exception("Galera is not working as expected")
+                }
+            }
+            sleep(time: 500, unit: 'MILLISECONDS')
+        } else {
+            break
+        }
+    }
+}
+
+def verifyContrail(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    salt.commandStatus(pepperEnv, target, "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+}
+
+
+def verifyService(pepperEnv, target, service) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+    for (t in targetHosts) {
+        try {
+            salt.commandStatus(pepperEnv, t, "service ${service} status", 'running')
+        } catch (Exception er) {
+            common.errorMsg(er)
+            if (INTERACTIVE.toBoolean()) {
+                input message: "${service} service is not running correctly on ${t}. Please fix it first manually and only then click on PROCEED."
+            } else {
+                throw new Exception("${service} service is not running correctly on ${t}")
+            }
+        }
+    }
+}
+
+def verifyCeph(pepperEnv, target, type) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+    for (t in targetHosts) {
+        def hostname = salt.getReturnValues(salt.getPillar(pepperEnv, t, 'linux:network:hostname'))
+        try {
+            salt.commandStatus(pepperEnv, t, "systemctl status ceph-${type}${hostname}", 'running')
+        } catch (Exception er) {
+            common.errorMsg(er)
+            if (INTERACTIVE.toBoolean()) {
+                input message: "Ceph-${type}${hostname} service is not running correctly on ${t}. Please fix it first manually and only then click on PROCEED."
+            } else {
+                throw new Exception("Ceph-${type}${hostname} service is not running correctly on ${t}")
+            }
+        }
+    }
+}
+
+def verifyCephOsds(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+    for (t in targetHosts) {
+        def osd_ids = []
+        // get list of osd disks of the host
+        salt.runSaltProcessStep(pepperEnv, t, 'saltutil.sync_grains', [], null, true, 5)
+        def cephGrain = salt.getGrain(pepperEnv, t, 'ceph')
+        if(cephGrain['return'].isEmpty()){
+            throw new Exception("Ceph salt grain cannot be found!")
+        }
+        common.print(cephGrain)
+        def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
+        for (i in ceph_disks) {
+            def osd_id = i.getKey().toString()
+            osd_ids.add('osd.' + osd_id)
+            print("Will check osd." + osd_id)
+        }
+        for (i in osd_ids) {
+            try {
+                salt.commandStatus(pepperEnv, t, "ceph osd tree | grep -w ${i}", 'up')
+            } catch (Exception er) {
+                common.errorMsg(er)
+                if (INTERACTIVE.toBoolean()) {
+                    input message: "Ceph ${i} is not running correctly on ${t}. Please fix it first manually and only then click on PROCEED."
+                } else {
+                    throw new Exception("Ceph ${i} is not running correctly on ${t}")
+                }
+            }
+        }
+    }
+}
+
+
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
+
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
+
+            // TODO, add possibility to update just specific components like kernel, openstack, contrail, ovs, rabbitmq, galera, etc.
+
+            /*
+                * Update section
+            */
+            if (updates.contains("cfg")) {
+                def target = 'I@salt:master'
+                if (salt.testTarget(pepperEnv, target)) {
+                    def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
+                    getCfgNodeProvider(pepperEnv, master)
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        virsh.liveSnapshotPresent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
+                    } else {
+                        saltMasterBackup(pepperEnv)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t, target)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                }
+            }
+
+            if (updates.contains("ctl")) {
+                def target = CTL_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'ctl'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyAPIs(pepperEnv, target)
+                }
+            }
+
+            if (updates.contains("prx")) {
+                def target = PRX_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'prx'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'nginx')
+                }
+            }
+
+            if (updates.contains("msg")) {
+                def target = MSG_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'msg'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'rabbitmq-server')
+                }
+            }
+
+            if (updates.contains("dbs")) {
+                def target = DBS_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    backupGalera(pepperEnv)
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'dbs'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (REBOOT.toBoolean() || PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                            verifyGalera(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                        verifyGalera(pepperEnv, target)
+                    }
+                }
+            }
+
+            if (updates.contains("ntw")) {
+                def target = NTW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    backupContrail(pepperEnv)
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'ntw'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t, 'contrail')
+                            highstate(pepperEnv, t)
+                            verifyContrail(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target, 'contrail')
+                        highstate(pepperEnv, target)
+                        verifyContrail(pepperEnv, target)
+                    }
+                }
+            }
+
+            if (updates.contains("nal")) {
+                def target = NAL_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'nal'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t, 'contrail')
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target, 'contrail')
+                        highstate(pepperEnv, target)
+                    }
+                    verifyContrail(pepperEnv, target)
+                }
+            }
+
+            if (updates.contains("gtw-virtual")) {
+                def target = GTW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'gtw'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'neutron-dhcp-agent')
+                }
+            }
+
+            if (updates.contains("cmn")) {
+                def target = CMN_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'cmn'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    } else {
+                        backupCeph(pepperEnv, target)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyCeph(pepperEnv, target, 'mon@')
+                }
+            }
+
+            if (updates.contains("rgw")) {
+                def target = RGW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'rgw'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyCeph(pepperEnv, target, 'radosgw@rgw.')
+                }
+            }
+
+            if (updates.contains("log")) {
+                def target = LOG_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'log'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                }
+            }
+
+            if (updates.contains("mon")) {
+                def target = MON_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'mon'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                }
+            }
+
+            if (updates.contains("mtr")) {
+                def target = MTR_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'mtr'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                }
+            }
+
+            if (updates.contains("cid")) {
+                def target = CID_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        def generalTarget = 'cid'
+                        liveSnapshot(pepperEnv, target, generalTarget)
+                    }
+                    updatePkgs(pepperEnv, target, 'cicd')
+                    highstate(pepperEnv, target)
+                    verifyService(pepperEnv, target, 'docker')
+                }
+            }
+
+            //
+            //physical machines update CMP_TARGET
+            //
+            if (updates.contains("cmp")) {
+                def target = CMP_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'nova-compute')
+                }
+            }
+
+            if (updates.contains("kvm")) {
+                def target = KVM_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'libvirt-bin')
+                }
+            }
+
+            if (updates.contains("osd")) {
+                def target = CEPH_OSD_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyCephOsds(pepperEnv, target)
+                }
+            }
+
+            if (updates.contains("gtw-physical")) {
+                def target = GTW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            updatePkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        updatePkgs(pepperEnv, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'neutron-dhcp-agent')
+                }
+            }
+
+            /*
+                * Rollback section
+            */
+          /*  if (rollbacks.contains("cfg")) {
+                if (salt.testTarget(pepperEnv, 'I@salt:master')) {
+                    stage('ROLLBACK_CFG') {
+                        input message: "To rollback CFG nodes run the following commands on kvm nodes hosting the CFG nodes: virsh destroy cfg0X.domain; virsh define /var/lib/libvirt/images/cfg0X.domain.xml; virsh start cfg0X.domain; virsh snapshot-delete cfg0X.domain --metadata ${SNAPSHOT_NAME}; rm /var/lib/libvirt/images/cfg0X.domain.${SNAPSHOT_NAME}.qcow2; rm /var/lib/libvirt/images/cfg0X.domain.xml; At the end restart 'docker' service on all cicd nodes and run 'linux.system.repo' Salt states on cicd nodes. After running the previous commands current pipeline job will be killed."
+                        //rollbackSaltMaster(pepperEnv, 'I@salt:master')
+                        //finishSaltMasterRollback(pepperEnv, 'I@salt:master')
+                    }
+                }
+            } */
+
+            if (rollbacks.contains("ctl")) {
+                def target = CTL_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'ctl')
+                        verifyAPIs(pepperEnv, target)
+                    } else {
+                        removeNode(pepperEnv, target, 'ctl')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("prx")) {
+                def target = PRX_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'prx')
+                        verifyService(pepperEnv, target, 'nginx')
+                    } else {
+                        removeNode(pepperEnv, target, 'prx')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("msg")) {
+                def target = MSG_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'msg')
+                        salt.enforceState(pepperEnv, target, 'rabbitmq')
+                        verifyService(pepperEnv, target, 'rabbitmq-server')
+                    } else {
+                        removeNode(pepperEnv, target, 'msg')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("dbs")) {
+                def target = DBS_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'dbs')
+                        clusterGalera(pepperEnv)
+                        verifyGalera(pepperEnv, target)
+                    } else {
+                        removeNode(pepperEnv, target, 'dbs')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("ntw")) {
+                def target = NTW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'ntw')
+                        verifyContrail(pepperEnv, target)
+                    } else {
+                        removeNode(pepperEnv, target, 'ntw')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("nal")) {
+                def target = NAL_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'nal')
+                        verifyContrail(pepperEnv, target)
+                    } else {
+                        removeNode(pepperEnv, target, 'nal')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("gtw-virtual")) {
+                def target = GTW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'gtw')
+                        verifyService(pepperEnv, target, 'neutron-dhcp-agent')
+                    } else {
+                        removeNode(pepperEnv, target, 'gtw')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("cmn")) {
+                def target = CMN_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'cmn')
+                        verifyCeph(pepperEnv, target, 'mon@')
+                    } else {
+                        removeNode(pepperEnv, target, 'cmn')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("rgw")) {
+                def target = RGW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'rgw')
+                        verifyCeph(pepperEnv, target, 'radosgw@rgw.')
+                    } else {
+                        removeNode(pepperEnv, target, 'rgw')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("log")) {
+                def target = LOG_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'log')
+                    } else {
+                        removeNode(pepperEnv, target, 'log')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("mon")) {
+                def target = MON_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'mon')
+                    } else {
+                        removeNode(pepperEnv, target, 'mon')
+                    }
+                }
+            }
+
+            if (rollbacks.contains("mtr")) {
+                def target = MTR_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
+                        rollback(pepperEnv, target, 'mtr')
+                    } else {
+                        removeNode(pepperEnv, target, 'mtr')
+                    }
+                }
+            }
+            /*
+            if (ROLLBACK_CID.toBoolean()) {
+                def target = 'cid*'
+                if (salt.testTarget(pepperEnv, target)) {
+                    stage('ROLLBACK_CID') {
+                        input message: "To rollback CICD nodes run the following commands on kvm nodes hosting the cicd nodes: virsh destroy cid0X.domain; virsh define /var/lib/libvirt/images/cid0X.domain.xml; virsh start cid0X.domain; virsh snapshot-delete cid0X.domain --metadata ${SNAPSHOT_NAME}; rm /var/lib/libvirt/images/cid0X.domain.${SNAPSHOT_NAME}.qcow2; rm /var/lib/libvirt/images/cid0X.domain.xml; At the end restart 'docker' service on all cicd nodes and run 'linux.system.repo' Salt states on cicd nodes. After running the previous commands current pipeline job will be killed."
+                    }
+                }
+            } */
+
+            //
+            //physical machines rollback CMP_TARGET
+            //
+            if (rollbacks.contains("cmp")) {
+                def target = CMP_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            rollbackPkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        rollbackPkgs(pepperEnv, target, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'nova-compute')
+                }
+            }
+
+            if (rollbacks.contains("kvm")) {
+                def target = KVM_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            rollbackPkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        rollbackPkgs(pepperEnv, target, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'libvirt-bin')
+                }
+            }
+
+            if (rollbacks.contains("osd")) {
+                def target = CEPH_OSD_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            rollbackPkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        rollbackPkgs(pepperEnv, target, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyCephOsds(pepperEnv, target)
+                }
+            }
+
+            if (rollbacks.contains("gtw-physical")) {
+                def target = GTW_TARGET
+                if (salt.testTarget(pepperEnv, target)) {
+                    if (PER_NODE.toBoolean()) {
+                        def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+                        for (t in targetHosts) {
+                            rollbackPkgs(pepperEnv, t)
+                            highstate(pepperEnv, t)
+                        }
+                    } else {
+                        rollbackPkgs(pepperEnv, target, target)
+                        highstate(pepperEnv, target)
+                    }
+                    verifyService(pepperEnv, target, 'neutron-dhcp-agent')
+                }
+            }
+
+            /*
+                * Merge snapshots section
+            */
+            if (merges.contains("cfg")) {
+                if (salt.testTarget(pepperEnv, 'I@salt:master')) {
+                    mergeSnapshot(pepperEnv, 'I@salt:master')
+                }
+            }
+
+            if (merges.contains("ctl")) {
+                if (salt.testTarget(pepperEnv, CTL_TARGET)) {
+                    mergeSnapshot(pepperEnv, CTL_TARGET, 'ctl')
+                    verifyService(pepperEnv, CTL_TARGET, 'nova-api')
+                }
+            }
+
+            if (merges.contains("prx")) {
+                if (salt.testTarget(pepperEnv, PRX_TARGET)) {
+                    mergeSnapshot(pepperEnv, PRX_TARGET, 'prx')
+                    verifyService(pepperEnv, PRX_TARGET, 'nginx')
+                }
+            }
+
+            if (merges.contains("msg")) {
+                if (salt.testTarget(pepperEnv, MSG_TARGET)) {
+                    mergeSnapshot(pepperEnv, MSG_TARGET, 'msg')
+                    verifyService(pepperEnv, MSG_TARGET, 'rabbitmq-server')
+                }
+            }
+
+            if (merges.contains("dbs")) {
+                if (salt.testTarget(pepperEnv, DBS_TARGET)) {
+                    mergeSnapshot(pepperEnv, DBS_TARGET, 'dbs')
+                    verifyGalera(pepperEnv, DBS_TARGET)
+                    backupGalera(pepperEnv)
+                }
+            }
+
+            if (merges.contains("ntw")) {
+                if (salt.testTarget(pepperEnv, NTW_TARGET)) {
+                    mergeSnapshot(pepperEnv, NTW_TARGET, 'ntw')
+                    verifyContrail(pepperEnv, NTW_TARGET)
+                    backupContrail(pepperEnv)
+                }
+            }
+
+            if (merges.contains("nal")) {
+                if (salt.testTarget(pepperEnv, NAL_TARGET)) {
+                    mergeSnapshot(pepperEnv, NAL_TARGET, 'nal')
+                    verifyContrail(pepperEnv, NAL_TARGET)
+                }
+            }
+
+            if (merges.contains("gtw-virtual")) {
+                if (salt.testTarget(pepperEnv, GTW_TARGET)) {
+                    mergeSnapshot(pepperEnv, GTW_TARGET, 'gtw')
+                    verifyService(pepperEnv, GTW_TARGET, 'neutron-dhcp-agent')
+                }
+            }
+
+            if (merges.contains("cmn")) {
+                if (salt.testTarget(pepperEnv, CMN_TARGET)) {
+                    mergeSnapshot(pepperEnv, CMN_TARGET, 'cmn')
+                    verifyCeph(pepperEnv, CMN_TARGET, 'mon@')
+                    backupCeph(pepperEnv, CMN_TARGET)
+                }
+            }
+
+            if (merges.contains("rgw")) {
+                if (salt.testTarget(pepperEnv, RGW_TARGET)) {
+                    mergeSnapshot(pepperEnv, RGW_TARGET, 'rgw')
+                    verifyCeph(pepperEnv, RGW_TARGET, 'radosgw@rgw.')
+                }
+            }
+
+            if (merges.contains("log")) {
+                if (salt.testTarget(pepperEnv, LOG_TARGET)) {
+                    mergeSnapshot(pepperEnv, LOG_TARGET. 'log')
+                }
+            }
+
+            if (merges.contains("mon")) {
+                if (salt.testTarget(pepperEnv, MON_TARGET)) {
+                    mergeSnapshot(pepperEnv, MON_TARGET, 'mon')
+                }
+            }
+
+            if (merges.contains("mtr")) {
+                if (salt.testTarget(pepperEnv, MTR_TARGET)) {
+                    mergeSnapshot(pepperEnv, MTR_TARGET, 'mtr')
+                }
+            }
+
+            if (merges.contains("cid")) {
+                if (salt.testTarget(pepperEnv, CID_TARGET)) {
+                    mergeSnapshot(pepperEnv, CID_TARGET, 'cid')
+                    verifyService(pepperEnv, CID_TARGET, 'docker')
+                }
+            }
+
+            if (RESTORE_GALERA.toBoolean()) {
+                restoreGalera(pepperEnv)
+                verifyGalera(pepperEnv, DBS_TARGET)
+            }
+
+            if (RESTORE_CONTRAIL_DB.toBoolean()) {
+                restoreContrailDb(pepperEnv)
+                // verification is already present in restore pipelines
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        }
+    }
+}
diff --git a/docker-cleanup-pipeline.groovy b/docker-cleanup-pipeline.groovy
index 710b1bb..388793a 100644
--- a/docker-cleanup-pipeline.groovy
+++ b/docker-cleanup-pipeline.groovy
@@ -28,7 +28,7 @@
           """, false)
     }
     stage("Run docker system prune"){
-      salt.cmdRun(pepperEnv, 'I@jenkins:slave', "docker system prune -f")
+      salt.cmdRun(pepperEnv, 'I@jenkins:slave', "docker system prune -f", false) // dont verify the result
     }
   }
 }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 39590e5..c2d4943 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -54,6 +54,8 @@
             def clusterDomain = templateContext.default_context.cluster_domain
             def clusterName = templateContext.default_context.cluster_name
             def saltMaster = templateContext.default_context.salt_master_hostname
+            def localRepositories = templateContext.default_context.local_repositories.toBoolean()
+            def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
             def cutterEnv = "${env.WORKSPACE}/cutter"
             def jinjaEnv = "${env.WORKSPACE}/jinja"
             def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
@@ -98,6 +100,7 @@
                     sh "git init"
                     ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
                 }
+
                 def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
                 // Use refspec if exists first of all
                 if (sharedReclassBranch.toString().startsWith('refs/')) {
@@ -161,6 +164,16 @@
                 }
             }
 
+            if(localRepositories && !offlineDeployment){
+                def aptlyModelUrl = templateContext.default_context.local_model_url
+                dir(path: modelEnv) {
+                    ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
+                        if(!(mcpVersion in ["nightly", "testing", "stable"])){
+                        ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
+                    }
+                }
+            }
+
             stage('Generate new SaltMaster node') {
                 def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
                 def nodeString = """classes:
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 8f1b612..21134a2 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -145,7 +145,7 @@
     salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
     salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
     salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
 
     def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
     if(databases && databases != ""){
@@ -249,7 +249,7 @@
     }
 
     if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
-        salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+        salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
 
         salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
 
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index ffc035a..10ec378 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -25,6 +25,16 @@
             stage('Ask for manual confirmation') {
                 input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Cassandra?"
             }
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('neutron-server service already stopped')
+            }
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Supervisor-config service already stopped')
+            }
             // Cassandra restore section
             try {
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
@@ -70,6 +80,7 @@
             sleep(5)
 
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
 
             // wait until contrail-status is up
             salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index 3893066..7fc333d 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -10,6 +10,7 @@
  *  SYSTEM_GIT_URL
  *  SYSTEM_GIT_REF
  *  FORMULAS_SOURCE
+ *  RECLASS_VERSION
  *  MAX_CPU_PER_JOB
  *  LEGACY_TEST_MODE
  *  RECLASS_IGNORE_CLASS_NOTFOUND
@@ -63,7 +64,7 @@
             def workspace = common.getWorkspace()
             common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
             try {
-              saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger(), RECLASS_IGNORE_CLASS_NOTFOUND, LEGACY_TEST_MODE, APT_REPOSITORY, APT_REPOSITORY_GPG)
+              saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, RECLASS_VERSION, MAX_CPU_PER_JOB.toInteger(), RECLASS_IGNORE_CLASS_NOTFOUND, LEGACY_TEST_MODE, APT_REPOSITORY, APT_REPOSITORY_GPG)
             } catch (Exception e) {
               if (e.getMessage() == "script returned exit code 124") {
                 common.errorMsg("Impossible to test node due to timeout of salt-master, ABORTING BUILD")
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index ca07fc1..d6279f9 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -105,6 +105,7 @@
     [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: FORMULAS_REVISION],
     [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
     [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: SYSTEM_GIT_URL],
+    [$class: 'StringParameterValue', name: 'RECLASS_VERSION', value: RECLASS_VERSION],
     [$class: 'StringParameterValue', name: 'MAX_CPU_PER_JOB', value: MAX_CPU_PER_JOB],
     [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: SYSTEM_GIT_REF],
     [$class: 'BooleanParameterValue', name: 'LEGACY_TEST_MODE', value: LEGACY_TEST_MODE.toBoolean()],