Support Salt batch sizing for most important pipelines

Add parameter BATCH_SIZE to support Batch sizing for Salt to be
applied on huge amount of nodes.

Change-Id: I1547df928990098a7969b5535ca611d7fb6cc581
Related: PROD-27850 (PROD:27850)
Related: PROD-32646 (PROD:32646)
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 9b1c89d..32de91e 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -8,6 +8,7 @@
  *   DRIVE_TRAIN_PARAMS         Yaml, DriveTrain releated params:
  *     SALT_MASTER_URL            Salt API server location
  *     SALT_MASTER_CREDENTIALS    Credentials to the Salt API
+ *     BATCH_SIZE                 Use batch sizing during upgrade for large envs
  *     UPGRADE_SALTSTACK          Upgrade SaltStack packages to new version.
  *     UPDATE_CLUSTER_MODEL       Update MCP version parameter in cluster model
  *     UPDATE_PIPELINES           Update pipeline repositories on Gerrit
@@ -280,6 +281,7 @@
             def updateLocalRepos = ''
             def reclassSystemBranch = ''
             def reclassSystemBranchDefault = gitTargetMcpVersion
+            def batchSize = ''
             if (gitTargetMcpVersion != 'proposed') {
                 reclassSystemBranchDefault = "origin/${gitTargetMcpVersion}"
             }
@@ -293,6 +295,7 @@
                 updatePipelines = driteTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
                 updateLocalRepos = driteTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
                 reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
+                batchSize = driveTrainParams.get('BATCH_SIZE', '')
             } else {
                 // backward compatibility for 2018.11.0
                 saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -309,6 +312,12 @@
             if (cluster_name == '' || cluster_name == 'null' || cluster_name == null) {
                 error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
             }
+            if (!batch_size) {
+                def workerThreads = salt.getReturnValues(salt.getPillar(venvPepper, "I@salt:master", "salt:master:worker_threads", null))
+                if (workerThreads.isInteger() && workerThreads.toInteger() > 0) {
+                   batch_size = workerThreads
+                }
+            }
 
             stage('Update Reclass and Salt-Formulas') {
                 common.infoMsg('Perform: Full salt sync')
@@ -512,7 +521,7 @@
                 if (upgradeSaltStack) {
                     updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
 
-                    salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
+                    salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true, true, batchSize)
                     updateSaltStack("I@salt:minion and not I@salt:master", '["salt-minion"]')
                 }
 
@@ -523,13 +532,13 @@
                 }
 
                 // update minions certs
-                salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
+                salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true, true, batchSize)
 
                 // Retry needed only for rare race-condition in user appearance
                 common.infoMsg('Perform: updating users and keys')
-                salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)
+                salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true, true, batchSize)
                 common.infoMsg('Perform: updating openssh')
-                salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
+                salt.enforceState(venvPepper, "I@linux:system", 'openssh', true, true, batchSize)
 
                 // Apply changes for HaProxy on CI/CD nodes
                 salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)