Merge "Refactor backupninja backup pipeline to support Dogtag backups"
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index 96ca29d..4bbb78d 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -64,7 +64,8 @@
 
         stage ("verify client versions")
         {
-          def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not E@mon*")
+          // I@docker:swarm and I@prometheus:server - mon* nodes
+          def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not ( I@docker:swarm and I@prometheus:server )")
           for ( node in nodes )
           {
             def versions = salt.cmdRun("pepperEnv", node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 169bbd0..66e9422 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -56,10 +56,14 @@
         }
     }
     if (partition?.trim()) {
-        // dev = /dev/sdi
+        def part_id
+        if (partition.contains("nvme")) {
+          part_id = partition.substring(partition.lastIndexOf("p")+1).replaceAll("[^0-9]+", "")
+        }
+        else {
+          part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
+        }
         def dev = partition.replaceAll('\\d+$', "")
-        // part_id = 2
-        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
         runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
     }
     return
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index cc8a84d..a50c253 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -33,20 +33,27 @@
     return salt.cmdRun(master, target, cmd)
 }
 
-def waitForHealthy(master, count=0, attempts=300) {
+def waitForHealthy(master, flags, count=0, attempts=300) {
     // wait for healthy cluster
     while (count<attempts) {
         def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
         if (health.contains('HEALTH_OK')) {
             common.infoMsg('Cluster is healthy')
             break;
+        } else {
+          for (flag in flags) {
+            if (health.contains(flag + ' flag(s) set') && !(health.contains('down'))) {
+              common.infoMsg('Cluster is healthy')
+              return;
+            }
+          }
         }
         count++
         sleep(10)
     }
 }
 
-def backup(master, target) {
+def backup(master, flags, target) {
     stage("backup ${target}") {
 
         if (target == 'osd') {
@@ -72,7 +79,7 @@
                 def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
                 def minionProvider = provider_pillar['return'][0].values()[0]
 
-                waitForHealthy(master)
+                waitForHealthy(master, flags)
                 try {
                     salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
                 } catch (Exception e) {
@@ -89,14 +96,14 @@
                     common.warningMsg(e)
                 }
                 salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
-                waitForHealthy(master)
+                waitForHealthy(master, flags)
             }
         }
     }
     return
 }
 
-def upgrade(master, target) {
+def upgrade(master, target, flags) {
 
     stage("Change ${target} repos") {
         salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
@@ -127,13 +134,21 @@
             }
             // restart services
             stage("Restart ${target} services on ${minion}") {
-                runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
+                if (target == 'osd') {
+                  def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
+                  osds[0].values()[0].values()[0].each { osd,param ->
+                    runCephCommand(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
+                    waitForHealthy(master, flags)
+                  }
+                } else {
+                  runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
+                  waitForHealthy(master, flags)
+                }
             }
 
             stage("Verify services for ${minion}") {
                 sleep(10)
                 runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
-                waitForHealthy(master)
             }
 
             stage('Ask for manual confirmation') {
@@ -198,23 +213,23 @@
         }
 
         if (STAGE_UPGRADE_MON.toBoolean() == true) {
-            upgrade(pepperEnv, 'mon')
+            upgrade(pepperEnv, 'mon', flags)
         }
 
         if (STAGE_UPGRADE_MGR.toBoolean() == true) {
-            upgrade(pepperEnv, 'mgr')
+            upgrade(pepperEnv, 'mgr', flags)
         }
 
         if (STAGE_UPGRADE_OSD.toBoolean() == true) {
-            upgrade(pepperEnv, 'osd')
+            upgrade(pepperEnv, 'osd', flags)
         }
 
         if (STAGE_UPGRADE_RGW.toBoolean() == true) {
-            upgrade(pepperEnv, 'radosgw')
+            upgrade(pepperEnv, 'radosgw', flags)
         }
 
         if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
-            upgrade(pepperEnv, 'common')
+            upgrade(pepperEnv, 'common', flags)
         }
 
         // remove cluster flags
@@ -248,7 +263,7 @@
 
         // wait for healthy cluster
         if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-            waitForHealthy(pepperEnv)
+            waitForHealthy(pepperEnv, flags)
         }
     }
 }
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 1ab46de..0713192 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -46,6 +46,10 @@
  *   SALT_VERSION               Version of Salt  which is going to be installed i.e. 'stable 2016.3' or 'stable 2017.7' etc.
  *
  *   EXTRA_TARGET               The value will be added to target nodes
+ *   BATCH_SIZE                 Use batching for states, which may be targeted for huge amount of nodes. Format:
+                                - 10 - number of nodes
+                                - 10% - percentage of all targeted nodes
+
  *
  * Test settings:
  *   TEST_K8S_API_SERVER     Kubernetes API address
@@ -105,6 +109,10 @@
 if (common.validInputParam('EXTRA_TARGET')) {
     extra_tgt = "${EXTRA_TARGET}"
 }
+def batch_size = ''
+if (common.validInputParam('BATCH_SIZE')) {
+    batch_size = "${BATCH_SIZE}"
+}
 
 timeout(time: 12, unit: 'HOURS') {
     node(slave_node) {
@@ -347,9 +355,15 @@
             //
             // Install
             //
+            if (!batch_size) {
+                def workerThreads = salt.getReturnValues(salt.getPillar(venvPepper, "I@salt:master", "salt:master:worker_threads", null)).toString()
+                if (workerThreads.isInteger() && workerThreads.toInteger() > 0) {
+                   batch_size = workerThreads
+                }
+            }
 
             // Check if all minions are reachable and ready
-            salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*'])
+            salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*', batch: batch_size])
 
             if (common.checkContains('STACK_INSTALL', 'core')) {
                 stage('Install core infrastructure') {
@@ -357,7 +371,7 @@
                     if (common.validInputParam('STATIC_MGMT_NETWORK')) {
                         staticMgmtNetwork = STATIC_MGMT_NETWORK.toBoolean()
                     }
-                    orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt)
+                    orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt, batch_size)
 
                     if (common.checkContains('STACK_INSTALL', 'kvm')) {
                         orchestrate.installInfraKvm(venvPepper, extra_tgt)
@@ -384,7 +398,7 @@
             // install k8s
             if (common.checkContains('STACK_INSTALL', 'k8s')) {
                 extra_tgt_bckp = extra_tgt
-                extra_tgt = 'and not kdt* and not cfg* ' + extra_tgt_bckp
+                extra_tgt = 'and not kdt* and not I@salt:master ' + extra_tgt_bckp
                 stage('Install Kubernetes infra') {
                     if (STACK_TYPE == 'aws') {
                         // configure kubernetes_control_address - save loadbalancer
@@ -509,7 +523,8 @@
                 // Workaround for PROD-17765 issue to prevent crashes of keystone.role_present state.
                 // More details: https://mirantis.jira.com/browse/PROD-17765
                 salt.restartSaltMinion(venvPepper, "I@keystone:client ${extra_tgt}")
-                salt.minionsReachable(venvPepper, "I@salt:master and *01* ${extra_tgt}", 'I@keystone:client', null, 10, 6)
+                //
+                salt.minionsReachable(venvPepper, 'I@salt:master', 'I@keystone:client ${extra_tgt}', null, 10, 6)
 
                 stage('Install OpenStack network') {
 
@@ -539,7 +554,7 @@
                 }
 
                 stage('Install OpenStack compute') {
-                    orchestrate.installOpenstackCompute(venvPepper, extra_tgt)
+                    orchestrate.installOpenstackCompute(venvPepper, extra_tgt, batch_size)
 
                     if (common.checkContains('STACK_INSTALL', 'contrail')) {
                         orchestrate.installContrailCompute(venvPepper, extra_tgt)
@@ -560,7 +575,7 @@
             if (common.checkContains('STACK_INSTALL', 'cicd')) {
                 stage('Install Cicd') {
                     extra_tgt_bckp = extra_tgt
-                    extra_tgt = 'and cid* ' + extra_tgt_bckp
+                    extra_tgt = 'and I@_param:drivetrain_role:cicd ' + extra_tgt_bckp
                     orchestrate.installInfra(venvPepper, extra_tgt)
                     orchestrate.installCicd(venvPepper, extra_tgt)
                     extra_tgt = extra_tgt_bckp
@@ -612,7 +627,7 @@
                         test.executeConformance(config)
                     } else {
                         def output_file = image.replaceAll('/', '-') + '.output'
-                        def target = "ctl01* ${extra_tgt}"
+                        def target = "I@keystone:server:role:primary ${extra_tgt}"
                         def conformance_output_file = 'conformance_test.tar'
 
                         // run image
@@ -642,7 +657,7 @@
                               "py.test --junit-xml=${report_dir}report.xml" +
                               " --html=${report_dir}report.html -v vapor/tests/ -k 'not destructive' "
 
-                    salt.runSaltProcessStep(venvPepper, 'cfg*', 'saltutil.refresh_pillar', [], null, true)
+                    salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'saltutil.refresh_pillar', [], null, true)
                     salt.enforceState(venvPepper, 'I@opencontrail:test' , 'opencontrail.test' , true)
 
                     salt.cmdRun(venvPepper, 'I@opencontrail:test', cmd, false)
@@ -659,7 +674,7 @@
                     def gluster_compound = "I@glusterfs:server ${extra_tgt}"
                     def salt_ca_compound = "I@salt:minion:ca:salt_master_ca ${extra_tgt}"
                     // Enforce highstate asynchronous only on the nodes which are not glusterfs servers
-                    salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound + ' and not ' + salt_ca_compound)
+                    salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound + ' and not ' + salt_ca_compound, batch_size)
                     // Iterate over nonempty set of gluster servers and apply highstates one by one
                     // TODO: switch to batch once salt 2017.7+ would be used
                     def saltcaMinions = salt.getMinionsSorted(venvPepper, salt_ca_compound)
diff --git a/deploy-virtual-edge-mom.groovy b/deploy-virtual-edge-mom.groovy
index 875195b..8d35d37 100644
--- a/deploy-virtual-edge-mom.groovy
+++ b/deploy-virtual-edge-mom.groovy
@@ -171,7 +171,7 @@
                     saltMasterURL = "http://${edgeBuildsInfra[ed_].description.tokenize(' ')[1]}:6969"
 
 
-                    enableSyndic(saltMasterURL, 'cfg01*', SALT_MASTER_CREDENTIALS, salt_mom_ip)
+                    enableSyndic(saltMasterURL, 'I@salt:master', SALT_MASTER_CREDENTIALS, salt_mom_ip)
 
                     props_ = edge_deploy_schemas[ed_]['properties']
                     deploy_job = edge_deploy_schemas[ed_]['deploy_job_name']
diff --git a/galera-cluster-verify-restore.groovy b/galera-cluster-verify-restore.groovy
index afec990..0962f88 100644
--- a/galera-cluster-verify-restore.groovy
+++ b/galera-cluster-verify-restore.groovy
@@ -20,6 +20,7 @@
 def restoreType = env.RESTORE_TYPE
 def runRestoreDb = false
 def runBackupDb = false
+def restartCluster = false
 
 askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
 checkTimeSync = (env.getProperty('CHECK_TIME_SYNC') ?: true).toBoolean()
@@ -34,12 +35,17 @@
 if (restoreType.equals("BACKUP_AND_RESTORE")) {
     runBackupDb = true
 }
+if (restoreType.equals("RESTART_CLUSTER")) {
+    restartCluster = true
+}
 
 timeout(time: 12, unit: 'HOURS') {
     node() {
         stage('Setup virtualenv for Pepper') {
             python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
+
+        def galeraStatus = [:]
         stage('Verify status') {
             def sysstatTargets = 'I@xtrabackup:client or I@xtrabackup:server'
             def sysstatTargetsNodes = salt.getMinions(pepperEnv, sysstatTargets)
@@ -57,50 +63,48 @@
                     input message: "Do you want to continue? Click to confirm"
                 }
             }
-            resultCode = galera.verifyGaleraStatus(pepperEnv, false, checkTimeSync)
-            if (resultCode == 128) {
-                common.errorMsg("Unable to connect to Galera Master. Trying slaves...")
-                resultCode = galera.verifyGaleraStatus(pepperEnv, true, checkTimeSync)
-                if (resultCode == 129) {
-                    common.errorMsg("Unable to obtain Galera slave minions list. Without fixing this issue, pipeline cannot continue in verification, backup and restoration. This may be caused by wrong Galera configuration or corrupted pillar data.")
+            galeraStatus = galera.verifyGaleraStatus(pepperEnv, checkTimeSync)
+
+            switch (galeraStatus.error) {
+                case 128:
+                    common.errorMsg("Unable to obtain Galera members minions list. Without fixing this issue, pipeline cannot continue in verification, backup and restoration. This may be caused by wrong Galera configuration or corrupted pillar data.")
                     currentBuild.result = "FAILURE"
                     return
-                } else if (resultCode == 130) {
+                case 130:
                     common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification, backup and restoration. Is at least one member of the Galera cluster up and running?")
                     currentBuild.result = "FAILURE"
                     return
-                }
-            }
-            if (resultCode == 131) {
-                common.errorMsg("Time desynced - Please fix this issue and rerun the pipeline.")
-                currentBuild.result = "FAILURE"
-                return
-            }
-            if (resultCode == 140 || resultCode == 141) {
-                common.errorMsg("Disk utilization check failed - Please fix this issue and rerun the pipeline.")
-                currentBuild.result = "FAILURE"
-                return
-            }
-            if (resultCode == 1) {
-                if (askConfirmation) {
-                    input message: "There was a problem with parsing the status output or with determining it. Do you want to run a restore?"
-                } else {
-                    common.warningMsg("There was a problem with parsing the status output or with determining it. Try to restore.")
-                }
-            } else if (resultCode > 1) {
-                if (askConfirmation) {
-                    input message: "There's something wrong with the cluster, do you want to continue with backup and/or restore?"
-                } else {
-                    common.warningMsg("There's something wrong with the cluster, try to backup and/or restore.")
-                }
-            } else {
-                if (askConfirmation) {
-                    input message: "There seems to be everything alright with the cluster, do you still want to continue with backup and/or restore?"
-                } else {
-                    common.warningMsg("There seems to be everything alright with the cluster, no backup and no restoration will be done.")
-                    currentBuild.result = "SUCCESS"
+                case 131:
+                    common.errorMsg("Time desynced - Please fix this issue and rerun the pipeline.")
+                    currentBuild.result = "FAILURE"
                     return
-                }
+                case 140..141:
+                    common.errorMsg("Disk utilization check failed - Please fix this issue and rerun the pipeline.")
+                    currentBuild.result = "FAILURE"
+                    return
+                case 1:
+                    if (askConfirmation) {
+                        input message: "There was a problem with parsing the status output or with determining it. Do you want to run a next action: ${restoreType}?"
+                    } else {
+                        common.warningMsg("There was a problem with parsing the status output or with determining it. Trying to perform action: ${restoreType}.")
+                    }
+                    break
+                case 0:
+                    if (askConfirmation) {
+                        input message: "There seems to be everything alright with the cluster, do you still want to continue with next action: ${restoreType}?"
+                        break
+                    } else {
+                        common.warningMsg("There seems to be everything alright with the cluster, no backup and no restoration will be done.")
+                        currentBuild.result = "SUCCESS"
+                        return
+                    }
+                default:
+                    if (askConfirmation) {
+                        input message: "There's something wrong with the cluster, do you want to continue with action: ${restoreType}?"
+                    } else {
+                        common.warningMsg("There's something wrong with the cluster, trying to perform action: ${restoreType}")
+                    }
+                    break
             }
         }
         if (runBackupDb) {
@@ -116,24 +120,41 @@
                 )
             }
         }
-        if (runRestoreDb) {
-            stage('Restore') {
-                if (askConfirmation) {
-                    input message: "Are you sure you want to run a restore? Click to confirm"
-                }
-                try {
-                    if ((!askConfirmation && resultCode > 0) || askConfirmation) {
-                        galera.restoreGaleraCluster(pepperEnv, runRestoreDb)
+        if (runRestoreDb || restartCluster) {
+            if (runRestoreDb) {
+                stage('Restore') {
+                    if (askConfirmation) {
+                        input message: "Are you sure you want to run a restore? Click to confirm"
                     }
-                } catch (Exception e) {
-                    common.errorMsg("Restoration process has failed.")
-                    common.errorMsg(e.getMessage())
+                    try {
+                        if ((!askConfirmation && resultCode > 0) || askConfirmation) {
+                            galera.restoreGaleraCluster(pepperEnv, galeraStatus)
+                        }
+                    } catch (Exception e) {
+                        common.errorMsg("Restoration process has failed.")
+                        common.errorMsg(e.getMessage())
+                    }
+                }
+            }
+            if (restartCluster) {
+                stage('Restart cluster') {
+                    if (askConfirmation) {
+                        input message: "Are you sure you want to run a restart? Click to confirm"
+                    }
+                    try {
+                        if ((!askConfirmation && resultCode > 0) || askConfirmation) {
+                            galera.restoreGaleraCluster(pepperEnv, galeraStatus, false)
+                        }
+                    } catch (Exception e) {
+                        common.errorMsg("Restart process has failed.")
+                        common.errorMsg(e.getMessage())
+                    }
                 }
             }
             stage('Verify restoration result') {
                 common.retry(verificationRetries, 15) {
-                    exitCode = galera.verifyGaleraStatus(pepperEnv, false, false)
-                    if (exitCode >= 1) {
+                    def status = galera.verifyGaleraStatus(pepperEnv, false)
+                    if (status.error >= 1) {
                         error("Verification attempt finished with an error. This may be caused by cluster not having enough time to come up or to sync. Next verification attempt in 5 seconds.")
                     } else {
                         common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 29f03fe..d8bfe3a 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -116,10 +116,13 @@
     }
 
     if (gitGuessedVersion == 'release/proposed/2019.2.0') {
+        def mcpSaltRepoUpdateVar = 'deb [arch=amd64] http://mirror.mirantis.com/update/proposed/salt-formulas/xenial xenial main'
+        if (context.get('offline_deployment', 'False').toBoolean()) {
+            mcpSaltRepoUpdateVar = "deb [arch=amd64] http://${context.get('aptly_server_deploy_address')}/update/proposed/salt-formulas/xenial xenial main".toString()
+        }
         // CFG node in 2019.2.X update has to be bootstrapped with update/proposed repository for salt formulas
         context['cloudinit_master_config'] = context.get('cloudinit_master_config', false) ?: [:]
-        context['cloudinit_master_config']['MCP_SALT_REPO_UPDATES'] = context['cloudinit_master_config'].get('MCP_SALT_REPO_UPDATES', false) ?:
-                'deb [arch=amd64] http://mirror.mirantis.com/update/proposed/salt-formulas/xenial xenial main'
+        context['cloudinit_master_config']['MCP_SALT_REPO_UPDATES'] = context['cloudinit_master_config'].get('MCP_SALT_REPO_UPDATES', false) ?: mcpSaltRepoUpdateVar
     }
 
     common.infoMsg("Using context:\n" + context)
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index a358222..7c761a0 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -66,7 +66,7 @@
 
             stage('Opencontrail controllers upgrade') {
 
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
 
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
@@ -103,22 +103,14 @@
                 args = 'apt install contrail-database -y;'
                 check = 'nodetool status'
 
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 try {
                     salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
@@ -144,7 +136,7 @@
 
             stage('Opencontrail analytics upgrade') {
 
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
 
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
@@ -161,22 +153,14 @@
                 args = 'apt install contrail-database -y;'
                 check = 'nodetool status'
 
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 try {
                     salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
@@ -302,7 +286,7 @@
 
            stage('Opencontrail controllers rollback') {
 
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
@@ -318,22 +302,14 @@
                 args = 'apt install contrail-database -y --force-yes;'
                 check = 'nodetool status'
 
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 try {
                     salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
@@ -361,7 +337,7 @@
 
             stage('Opencontrail analytics rollback') {
 
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
@@ -377,22 +353,14 @@
                 args = 'apt install contrail-database -y --force-yes;'
                 check = 'nodetool status'
 
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+                runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
 
                 try {
                     salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 581168a..780beac 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -5,6 +5,7 @@
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
  *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
  *   TARGET_SERVERS             Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ *   BATCH_SIZE                 Use batching for large amount of target nodes
  *
 **/
 
@@ -18,6 +19,11 @@
 def command
 def commandKwargs
 
+def batch_size = ''
+if (common.validInputParam('BATCH_SIZE')) {
+    batch_size = "${BATCH_SIZE}"
+}
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
         try {
@@ -43,76 +49,76 @@
                     common.infoMsg("First node %nodename% has trusty")
                     common.infoMsg("Assuming trusty on all cluster, running extra network states...")
                     common.infoMsg("Network iteration #1. Bonding")
-                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true, true, batch_size)
                     common.infoMsg("Network iteration #2. Vlan tagging and bridging")
-                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true, true, batch_size)
                 }
             }
 
             stage("Setup repositories") {
-                salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true)
+                salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true, true, batch_size)
             }
 
             stage("Upgrade packages") {
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], batch_size, true)
             }
 
             stage("Update Hosts file") {
-                salt.enforceState(pepperEnv, "I@linux:system", 'linux.network.host', true)
+                salt.enforceState(pepperEnv, "I@linux:system", 'linux.network.host', true, true, batch_size)
             }
 
             stage("Setup networking") {
                 // Sync all of the modules from the salt master.
-                salt.syncAll(pepperEnv, targetLiveAll)
+                salt.syncAll(pepperEnv, targetLiveAll, batch_size)
 
                 // Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], batch_size, true)
 
                 // Restart salt-minion to take effect.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], batch_size, true, 10)
 
                 // Configure networking excluding vhost0 interface.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], batch_size, true)
 
                 // Kill unnecessary processes ifup/ifdown which is stuck from previous state linux.network.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], batch_size, false)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], batch_size, false)
 
                 // Restart networking to bring UP all interfaces.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], batch_size, true, 300)
             }
 
             stage("Highstate compute") {
                 // Execute highstate without state opencontrail.client.
                 common.retry(2){
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], batch_size, true)
                 }
 
                 // Apply nova state to remove libvirt default bridge virbr0.
-                salt.enforceState(pepperEnv, targetLiveAll, 'nova', true)
+                salt.enforceState(pepperEnv, targetLiveAll, 'nova', true, true, batch_size)
 
                 // Execute highstate.
-                salt.enforceHighstate(pepperEnv, targetLiveAll, true)
+                salt.enforceHighstate(pepperEnv, targetLiveAll, true, true, batch_size)
 
                 // Restart supervisor-vrouter.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], batch_size, true, 300)
 
                 // Apply salt and collectd if is present to update information about current network interfaces.
-                salt.enforceState(pepperEnv, targetLiveAll, 'salt', true)
+                salt.enforceState(pepperEnv, targetLiveAll, 'salt', true, true, batch_size)
                 if(!salt.getPillar(pepperEnv, minions[0], "collectd")['return'][0].values()[0].isEmpty()) {
-                    salt.enforceState(pepperEnv, targetLiveAll, 'collectd', true)
+                    salt.enforceState(pepperEnv, targetLiveAll, 'collectd', true, true, batch_size)
                 }
             }
 
         stage("Update/Install monitoring") {
             //Collect Grains
-            salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains')
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules')
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update')
+            salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains', true, true, batch_size)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules', [], batch_size)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update', [], batch_size)
             sleep(5)
 
-            salt.enforceState(pepperEnv, targetLiveAll, 'prometheus')
-            salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+            salt.enforceState(pepperEnv, targetLiveAll, 'prometheus', true, true, batch_size)
+            salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus', true, true, batch_size)
         }
 
         } catch (Throwable e) {
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 7285c40..f1964ab 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -91,8 +91,11 @@
                     common.errorMsg('An error has been occurred during cassandra db startup on I@opencontrail:control and not I@cassandra:backup:client nodes: ' + err.getMessage())
                     throw err
                 }
-                // another mantra, wait till all services are up
-                sleep(60)
+                // wait till outstanding cassandra dbs are up
+                common.retry(6, 20){
+                    common.infoMsg("Trying to connect to casandra db on I@opencontrail:control and not I@cassandra:backup:client nodes ...")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', "nc -v -z -w2 ${configDbIp} ${configDbPort}")
+                }
                 try {
                     common.infoMsg("Start analytics containers node")
                     salt.cmdRun(pepperEnv, 'I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
diff --git a/update-package.groovy b/update-package.groovy
index df7655b..851c376 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -6,6 +6,7 @@
  *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
  *   TARGET_SERVERS             Salt compound target to match nodes to be updated [*, G@osfamily:debian].
  *   TARGET_PACKAGES            Space delimited list of packages to be updates [package1=version package2=version], empty string means all updating all packages to the latest version.
+ *   BATCH_SIZE                 Use batching for large amount of target nodes
  *
 **/
 
@@ -13,14 +14,19 @@
 salt = new com.mirantis.mk.Salt()
 common = new com.mirantis.mk.Common()
 
-def installSaltStack(target, pkgs, masterUpdate = false){
+def batch_size = ''
+if (common.validInputParam('BATCH_SIZE')) {
+    batch_size = "${BATCH_SIZE}"
+}
+
+def installSaltStack(target, pkgs, batch, masterUpdate = false){
     salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
     def minions_reachable = target
     if (masterUpdate) {
         // in case of update Salt Master packages - check all minions are good
         minions_reachable = '*'
     }
-    salt.checkTargetMinionsReady(['saltId': pepperEnv, 'target': target, 'target_reachable': minions_reachable])
+    salt.checkTargetMinionsReady(['saltId': pepperEnv, 'target': target, 'target_reachable': minions_reachable, 'batch': batch])
 }
 
 timeout(time: 12, unit: 'HOURS') {
@@ -46,7 +52,7 @@
 
             stage("List package upgrades") {
                 common.infoMsg("Listing all the packages that have a new update available on nodes: ${targetLiveAll}")
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.list_upgrades', [], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.list_upgrades', [], batch_size, true)
                 if (TARGET_PACKAGES != '' && TARGET_PACKAGES != '*') {
                     common.warningMsg("Note that only the \"${TARGET_PACKAGES}\" would be installed from the above list of available updates on the ${targetLiveAll}")
                     command = "pkg.install"
@@ -68,9 +74,9 @@
                     for (int i = 0; i < saltTargets.size(); i++ ) {
                         common.retry(10, 5) {
                             if (salt.getMinions(pepperEnv, "I@salt:master and ${saltTargets[i]}")) {
-                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', true)
+                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', null, true)
                             } else if (salt.getMinions(pepperEnv, "I@salt:minion and not I@salt:master and ${saltTargets[i]}")) {
-                                installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
+                                installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]', batch_size)
                             } else {
                                 error("Minion ${saltTargets[i]} is not reachable!")
                             }
@@ -78,7 +84,7 @@
                     }
                 }
                 common.infoMsg('Starting package upgrades...')
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, batch_size, packages, commandKwargs)
                 salt.printSaltCommandResult(out)
                 for(value in out.get("return")[0].values()){
                     if (value.containsKey('result') && value.result == false) {
diff --git a/update-salt-environment.groovy b/update-salt-environment.groovy
index b91f385..2ae408f 100644
--- a/update-salt-environment.groovy
+++ b/update-salt-environment.groovy
@@ -27,7 +27,7 @@
                         'apt-get update && apt-get install -y salt-formula-*'
                     )
                     common.infoMsg("Running salt sync-all")
-                    salt.runSaltProcessStep(venvPepper, 'jma*', 'saltutil.sync_all', [], null, true)
+                    salt.runSaltProcessStep(venvPepper, '*', 'saltutil.sync_all', [], null, true)
                 }
             }
             stage("Update Reclass") {
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 5cd74ea..be354cb 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -8,6 +8,7 @@
  *   DRIVE_TRAIN_PARAMS         Yaml, DriveTrain releated params:
  *     SALT_MASTER_URL            Salt API server location
  *     SALT_MASTER_CREDENTIALS    Credentials to the Salt API
+ *     BATCH_SIZE                 Use batch sizing during upgrade for large envs
  *     UPGRADE_SALTSTACK          Upgrade SaltStack packages to new version.
  *     UPDATE_CLUSTER_MODEL       Update MCP version parameter in cluster model
  *     UPDATE_PIPELINES           Update pipeline repositories on Gerrit
@@ -128,12 +129,9 @@
         common.infoMsg('Work-around for PROD-29155 already apply, nothing todo')
         return
     }
-    salt.fullRefresh(venvPepper, 'cfg*')
-    salt.fullRefresh(venvPepper, 'cmp*')
+    salt.fullRefresh(venvPepper, 'I@salt:master')
+    salt.fullRefresh(venvPepper, 'I@nova:compute')
     for (String minion in saltMinions) {
-        if (!minion.startsWith('cmp')) {
-            continue
-        }
         // First attempt, second will be performed in next validateReclassModel() stages
         try {
             salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
@@ -208,6 +206,30 @@
     }
 }
 
+def wa32182(String cluster_name) {
+    if (salt.testTarget(venvPepper, 'I@opencontrail:control or I@opencontrail:collector')) {
+        def clusterModelPath = "/srv/salt/reclass/classes/cluster/${cluster_name}"
+        def fixFile = "${clusterModelPath}/opencontrail/common_wa32182.yml"
+        def usualFile = "${clusterModelPath}/opencontrail/common.yml"
+        def fixFileContent = "classes:\n- system.opencontrail.common\n"
+        salt.cmdRun(venvPepper, 'I@salt:master', "test -f ${fixFile} -o -f ${usualFile} || echo '${fixFileContent}' > ${fixFile}")
+        def contrailFiles = ['opencontrail/analytics.yml', 'opencontrail/control.yml', 'openstack/compute/init.yml']
+        if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
+            contrailFiles.add('kubernetes/compute.yml')
+        }
+        for(String contrailFile in contrailFiles) {
+            contrailFile = "${clusterModelPath}/${contrailFile}"
+            def containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- cluster\\.${cluster_name}\\.opencontrail\\.common(_wa32182)?\$' ${contrailFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+            if (containsFix) {
+                continue
+            } else {
+                salt.cmdRun(venvPepper, 'I@salt:master', "grep -q -E '^parameters:' ${contrailFile} && sed -i '/^parameters:/i - cluster.${cluster_name}.opencontrail.common_wa32182' ${contrailFile} || " +
+                    "echo '- cluster.${cluster_name}.opencontrail.common_wa32182' >> ${contrailFile}")
+            }
+        }
+    }
+}
+
 def archiveReclassInventory(filename) {
     def _tmp_file = '/tmp/' + filename + UUID.randomUUID().toString().take(8)
     // jenkins may fail at overheap. Compress data with gzip like WA
@@ -283,19 +305,21 @@
             def updateLocalRepos = ''
             def reclassSystemBranch = ''
             def reclassSystemBranchDefault = gitTargetMcpVersion
+            def batchSize = ''
             if (gitTargetMcpVersion != 'proposed') {
                 reclassSystemBranchDefault = "origin/${gitTargetMcpVersion}"
             }
-            def driteTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
-            if (driteTrainParamsYaml) {
-                def driteTrainParams = readYaml text: driteTrainParamsYaml
-                saltMastURL = driteTrainParams.get('SALT_MASTER_URL')
-                saltMastCreds = driteTrainParams.get('SALT_MASTER_CREDENTIALS')
-                upgradeSaltStack = driteTrainParams.get('UPGRADE_SALTSTACK', false).toBoolean()
-                updateClusterModel = driteTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
-                updatePipelines = driteTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
-                updateLocalRepos = driteTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
-                reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
+            def driveTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
+            if (driveTrainParamsYaml) {
+                def driveTrainParams = readYaml text: driveTrainParamsYaml
+                saltMastURL = driveTrainParams.get('SALT_MASTER_URL')
+                saltMastCreds = driveTrainParams.get('SALT_MASTER_CREDENTIALS')
+                upgradeSaltStack = driveTrainParams.get('UPGRADE_SALTSTACK', false).toBoolean()
+                updateClusterModel = driveTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
+                updatePipelines = driveTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
+                updateLocalRepos = driveTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
+                reclassSystemBranch = driveTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
+                batchSize = driveTrainParams.get('BATCH_SIZE', '')
             } else {
                 // backward compatibility for 2018.11.0
                 saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -312,6 +336,12 @@
             if (cluster_name == '' || cluster_name == 'null' || cluster_name == null) {
                 error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
             }
+            if (!batchSize) {
+                def workerThreads = salt.getReturnValues(salt.getPillar(venvPepper, "I@salt:master", "salt:master:worker_threads", null)).toString()
+                if (workerThreads.isInteger() && workerThreads.toInteger() > 0) {
+                   batchSize = workerThreads
+                }
+            }
 
             stage('Update Reclass and Salt-Formulas') {
                 common.infoMsg('Perform: Full salt sync')
@@ -401,6 +431,7 @@
                             }
                         }
                     }
+                    wa32182(cluster_name)
                     // Add new defaults
                     common.infoMsg("Add new defaults")
                     salt.cmdRun(venvPepper, 'I@salt:master', "grep '^    mcp_version: ' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
@@ -431,7 +462,8 @@
                 }
 
                 wa29352(minions, cluster_name)
-                wa29155(minions, cluster_name)
+                def computeMinions = salt.getMinions(venvPepper, 'I@nova:compute')
+                wa29155(computeMinions, cluster_name)
 
                 try {
                     common.infoMsg('Perform: UPDATE Reclass package')
@@ -514,7 +546,7 @@
                 if (upgradeSaltStack) {
                     updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
 
-                    salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
+                    salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true, true, batchSize)
                     updateSaltStack("I@salt:minion and not I@salt:master", '["salt-minion"]')
                 }
 
@@ -525,13 +557,13 @@
                 }
 
                 // update minions certs
-                salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
+                salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true, true, batchSize)
 
                 // Retry needed only for rare race-condition in user appearance
                 common.infoMsg('Perform: updating users and keys')
-                salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)
+                salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true, true, batchSize)
                 common.infoMsg('Perform: updating openssh')
-                salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
+                salt.enforceState(venvPepper, "I@linux:system", 'openssh', true, true, batchSize)
 
                 // Apply changes for HaProxy on CI/CD nodes
                 salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
@@ -565,4 +597,4 @@
             throw e
         }
     }
-}
\ No newline at end of file
+}