Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0

89b2483 Pull docker images before running docker.client state
22390f6 Improve cluster model patching functions
24738da Fix flag set for upgrade to nautilus release
a5fff2c Switch CVP tests to tox
b8575e6 ceph-upgrade: enable msgr2 while upgrading to nautilus
9f6330b Commit common_wa32182 only if required
af258b0 replaced hardcoded "ceph_disk" literal with variable based on the lvm_enabled pillar
0493d5a add ceph mgr to update targets
199f4a9 Increase timeout to wait computes after reboot
59f82a5 Do not disable apache proxy configuration for Octavia if http scheme used
5b62e9c Upgrade Galera slaves to include users definitions
aef13c7 Fix for incorrect cvp tests fail status
215acda Check network hosts classes during upgrade
311751a Run openssh state after compute added to add host fingerprints
cf96067 Ability to upgrade nodes before deployment start
96cc57d Add step with upgrade OS for cicd nodes
cfa1b2f Fetch only reclass-system as only one expected submodule
9c7c1a3 Update the keystone.server for opencontrail checks
5d2054c Update openstack-compute-install pipeline
7b098c3 Set batch size by default to 2/3 of available worker_threads

Change-Id: If656dd1aaaffb06c8a959022ae189323df01adf9
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 676c236..a9bf720 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -91,13 +91,13 @@
             }
 
             def target_hosts = salt.getMinions(pepperEnv, TARGET)
-
+            def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
             for (tgt in target_hosts) {
                 def osd_ids = []
 
                 // get list of osd disks of the tgt
                 salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
-                def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+                def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
 
                 for (i in ceph_disks) {
                     def osd_id = i.getKey().toString()
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e1d6ce8..39ed07e 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -75,10 +75,10 @@
             }
         } else if (HOST_TYPE.toLowerCase() == 'osd') {
             def osd_ids = []
-
+            def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
             // get list of osd disks of the host
             salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
 
             for (i in ceph_disks) {
                 def osd_id = i.getKey().toString()
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index e643017..40409cd 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -47,7 +47,8 @@
             throw new Exception("Ceph salt grain cannot be found!")
         }
         common.print(cephGrain)
-        def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
+        def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+        def ceph_disks = cephGrain['return'][0].values()[0].values()[0][device_grain_name]
         common.prettyPrint(ceph_disks)
 
         for (i in ceph_disks) {
@@ -153,9 +154,20 @@
                 def data_partition_uuid = ""
                 def block_partition_uuid = ""
                 def lockbox_partition_uuid = ""
+                def osd_fsid = ""
+                def lvm = ""
+                def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
                 try {
-                    data_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
-                    common.print(data_partition_uuid)
+                    osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+                    if (lvm_enabled) {
+                        lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
+                        lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
+                        lvm["local"].each { lv, params ->
+                            if (params["Logical Volume Name"].contains(osd_fsid)) {
+                                data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+                            }
+                        }
+                    }
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
@@ -193,4 +205,4 @@
             }
         }
     }
-}
+}
\ No newline at end of file
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 297feaf..1e769ad 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -112,8 +112,9 @@
             // restart services
             stage("Restart ${target} services on ${minion}") {
                 if (target == 'osd') {
-                    def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
-                    osds[0].values()[0].values()[0].each { osd, param ->
+                    def device_grain_name =  salt.getPillar(master,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+                    def ceph_disks = salt.getGrain(master, minion, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
+                    ceph_disks.each { osd, param ->
                         salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
                         ceph.waitForHealthy(master, ADMIN_HOST, flags)
                     }
@@ -186,11 +187,18 @@
                 for (flag in flags) {
                     salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
                 }
+                if (ORIGIN_RELEASE == 'jewel') {
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set sortbitwise')
+                }
             }
         }
 
         if (STAGE_UPGRADE_MON.toBoolean() == true) {
             upgrade(pepperEnv, 'mon')
+
+            if (TARGET_RELEASE == 'nautilus' ) {
+                salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mon enable-msgr2")
+            }
         }
 
         if (STAGE_UPGRADE_MGR.toBoolean() == true) {
@@ -217,7 +225,6 @@
                         common.infoMsg('Removing flag ' + flag)
                         salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
                     }
-
                 }
             }
         }
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 1471acf..9143b97 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -49,6 +49,7 @@
  *   BATCH_SIZE                 Use batching for states, which may be targeted for huge amount of nodes. Format:
                                 - 10 - number of nodes
                                 - 10% - percentage of all targeted nodes
+ *   DIST_UPGRADE_NODES         Whether to run "apt-get dist-upgrade" on all nodes in cluster before deployment
 
  *
  * Test settings:
@@ -80,6 +81,7 @@
 python = new com.mirantis.mk.Python()
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
+debian = new com.mirantis.mk.Debian()
 
 _MAX_PERMITTED_STACKS = 2
 overwriteFile = "/srv/salt/reclass/classes/cluster/override.yml"
@@ -112,6 +114,10 @@
 if (common.validInputParam('BATCH_SIZE')) {
     batch_size = "${BATCH_SIZE}"
 }
+def upgrade_nodes = false
+if (common.validInputParam('DIST_UPGRADE_NODES')) {
+    upgrade_nodes = "${DIST_UPGRADE_NODES}".toBoolean()
+}
 
 timeout(time: 12, unit: 'HOURS') {
     node(slave_node) {
@@ -342,7 +348,11 @@
             // Install
             //
             if (!batch_size) {
-                batch_size = salt.getWorkerThreads(venvPepper)
+                // if no batch size provided get current worker threads and set batch size to 2/3 of it to avoid
+                // 'SaltReqTimeoutError: Message timed out' issue on Salt targets for large amount of nodes
+                // do not use toDouble/Double as it requires additional approved method
+                def workerThreads = salt.getWorkerThreads(venvPepper).toInteger()
+                batch_size = (workerThreads * 2 / 3).toString().tokenize('.')[0]
             }
 
             // Check if all minions are reachable and ready
@@ -357,12 +367,20 @@
                     orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt, batch_size)
 
                     if (common.checkContains('STACK_INSTALL', 'kvm')) {
+                        if (upgrade_nodes) {
+                            debian.osUpgradeNode(venvPepper, 'I@salt:control', 'dist-upgrade', 30, 20, batch_size)
+                            salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': 'I@salt:control', wait: 60, timeout: 10])
+                        }
                         orchestrate.installInfraKvm(venvPepper, extra_tgt)
-                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt)
+                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt, batch_size)
                     }
 
                     orchestrate.validateFoundationInfra(venvPepper, extra_tgt)
                 }
+                if (upgrade_nodes) {
+                    debian.osUpgradeNode(venvPepper, 'not ( I@salt:master or I@salt:control )', 'dist-upgrade', false, 30, 10, batch_size)
+                    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': 'not ( I@salt:master or I@salt:control )', wait: 60, timeout: 10])
+                }
             }
 
             stage('Install infra') {
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 9945d33..f45e4ec 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -897,6 +897,7 @@
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
     def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+    def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
     for (t in targetHosts) {
         def osd_ids = []
         // get list of osd disks of the host
@@ -906,7 +907,7 @@
             throw new Exception("Ceph salt grain cannot be found!")
         }
         common.print(cephGrain)
-        def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
+        def ceph_disks = cephGrain['return'][0].values()[0].values()[0][device_grain_name]
         for (i in ceph_disks) {
             def osd_id = i.getKey().toString()
             osd_ids.add('osd.' + osd_id)
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 13f41ea..f0e17b4 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -47,14 +47,13 @@
         withEnv(env_vars) {
             stage('Initialization') {
                 def container_workdir = '/var/lib'
-                def workdir = "${container_workdir}/${test_suite_name}"
                 def tests_set = (env.getProperty('tests_set')) ?: ''
                 def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} -vv ${tests_set}"
 
                 sh "mkdir -p ${artifacts_dir}"
 
                 // Enrichment for docker commands
-                def commands = EXTRA_PARAMS.get("commands") ?: ['010_start_tests': "cd ${workdir} && with_venv.sh ${script}"]
+                def commands = EXTRA_PARAMS.get("commands") ?: ['010_start_tests': "tox -e ${test_suite_name} -- ${script}"]
                 def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
 
                 // Enrichment for env variables
@@ -121,6 +120,18 @@
                         url: '',
                         xpath: '/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
                 }
+                try {
+                    sh """
+                        for i in ${artifacts_dir}/*.xml; do
+                            grep 'failures="0"' \$i
+                            grep 'errors="0"' \$i
+                        done
+                    """
+                } catch(err) {
+                    currentBuild.result = "FAILURE"
+                    common.errorMsg("[ERROR] Failures or errors is not zero in ${artifacts_dir}/*.xml")
+                    throw err
+                }
             }
         }
     }
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 2f89659..c80b53b 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -128,7 +128,7 @@
                 }
 
                 // Make sure that dedicated opencontrail user is created
-                salt.enforceState(pepperEnv, 'I@keystone:server:role:primary', 'keystone.client.server')
+                salt.enforceState(pepperEnv, 'I@keystone:server:role:primary', 'keystone.client.resources.v3')
 
                 try {
                     controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "docker:client:compose:opencontrail:service:controller:image")
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 780beac..c4db64b 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -1,5 +1,5 @@
 /**
- * Update packages on given nodes
+ * Deploy OpenStack compute node
  *
  * Expected parameters:
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
@@ -44,6 +44,11 @@
                 common.infoMsg("Selected nodes: ${targetLiveAll}")
             }
 
+            stage('Sync modules') {
+                // Sync all of the modules from the salt master.
+                salt.syncAll(pepperEnv, targetLiveAll, batch_size)
+            }
+
             stage("Trusty workaround") {
                 if(salt.getGrain(pepperEnv, minions[0], "oscodename")['return'][0].values()[0]["oscodename"] == "trusty") {
                     common.infoMsg("First node %nodename% has trusty")
@@ -63,14 +68,7 @@
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], batch_size, true)
             }
 
-            stage("Update Hosts file") {
-                salt.enforceState(pepperEnv, "I@linux:system", 'linux.network.host', true, true, batch_size)
-            }
-
             stage("Setup networking") {
-                // Sync all of the modules from the salt master.
-                salt.syncAll(pepperEnv, targetLiveAll, batch_size)
-
                 // Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], batch_size, true)
 
@@ -84,8 +82,8 @@
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], batch_size, false)
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], batch_size, false)
 
-                // Restart networking to bring UP all interfaces.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], batch_size, true, 300)
+                // Restart networking to bring UP all interfaces and restart minion to catch network changes.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["salt-call service.restart networking; salt-call service.restart salt-minion"], batch_size, true, 300)
             }
 
             stage("Highstate compute") {
@@ -100,9 +98,6 @@
                 // Execute highstate.
                 salt.enforceHighstate(pepperEnv, targetLiveAll, true, true, batch_size)
 
-                // Restart supervisor-vrouter.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], batch_size, true, 300)
-
                 // Apply salt and collectd if is present to update information about current network interfaces.
                 salt.enforceState(pepperEnv, targetLiveAll, 'salt', true, true, batch_size)
                 if(!salt.getPillar(pepperEnv, minions[0], "collectd")['return'][0].values()[0].isEmpty()) {
@@ -110,16 +105,34 @@
                 }
             }
 
-        stage("Update/Install monitoring") {
-            //Collect Grains
-            salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains', true, true, batch_size)
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules', [], batch_size)
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update', [], batch_size)
-            sleep(5)
+            // host records and fingerprints for compute nodes are generated dynamically - so apply state after node setup
+            stage('Update Hosts file and fingerprints') {
+                salt.enforceState(pepperEnv, "I@linux:network:host", 'linux.network.host', true, true, batch_size)
+                salt.enforceState(pepperEnv, "I@linux:system", 'openssh', true, true, batch_size)
+            }
 
-            salt.enforceState(pepperEnv, targetLiveAll, 'prometheus', true, true, batch_size)
-            salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus', true, true, batch_size)
-        }
+            // discover added compute hosts
+            stage('Discover compute hosts') {
+                salt.runSaltProcessStep(pepperEnv, 'I@nova:controller:role:primary', 'state.sls_id', ['nova_controller_discover_hosts', 'nova.controller'], batch_size, true)
+            }
+
+            stage("Update/Install monitoring") {
+                def slaServers = 'I@prometheus:server'
+                def slaMinions = salt.getMinions(pepperEnv, slaServers)
+
+                if (slaMinions.isEmpty()) {
+                    common.infoMsg('Monitoring is not enabled on environment, skipping...')
+                } else {
+                    //Collect Grains
+                    salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains', true, true, batch_size)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules', [], batch_size)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update', [], batch_size)
+                    sleep(5)
+
+                    salt.enforceState(pepperEnv, targetLiveAll, 'prometheus', true, true, batch_size)
+                    salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus', true, true, batch_size)
+                }
+            }
 
         } catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index e768564..d2161bc 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -160,8 +160,9 @@
           upgrade_mode = 'upgrade'
         }
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
-          debian.osUpgradeNode(env, target, upgrade_mode, false)
+          debian.osUpgradeNode(env, target, upgrade_mode, false, 60, 10)
         }
+        salt.checkTargetMinionsReady(['saltId': env, 'target': target, wait: 60, timeout: 10])
         // Workaround for PROD-31413, install python-tornado from latest release if available and
         // restart minion to apply new code.
         salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 55407f5..00c16b3 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -46,6 +46,12 @@
                     salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mon.target")
                     ceph.waitForHealthy(pepperEnv, tgt, flags)
                 }
+                selMinions = salt.getMinions(pepperEnv, "I@ceph:mgr")
+                for (tgt in selMinions) {
+                    // runSaltProcessStep 'service.restart' don't work for this services
+                    salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mgr.target")
+                    ceph.waitForHealthy(pepperEnv, tgt, flags)
+                }
                 selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
                 for (tgt in selMinions) {
                     salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
@@ -54,11 +60,11 @@
             }
 
             stage('Restart OSDs') {
-
+                def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
                 selMinions = salt.getMinions(pepperEnv, "I@ceph:osd")
                 for (tgt in selMinions) {
                     salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
-                    def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+                    def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
 
                     def osd_ids = []
                     for (i in ceph_disks) {
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 9e65fca..9a87c94 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -13,6 +13,9 @@
  *     UPDATE_CLUSTER_MODEL       Update MCP version parameter in cluster model
  *     UPDATE_PIPELINES           Update pipeline repositories on Gerrit
  *     UPDATE_LOCAL_REPOS         Update local repositories
+ *     OS_UPGRADE                 Run apt-get upgrade on Drivetrain nodes
+ *     OS_DIST_UPGRADE            Run apt-get dist-upgrade on Drivetrain nodes and reboot to apply changes
+ *     APPLY_MODEL_WORKAROUNDS    Whether to apply cluster model workarounds from the pipeline
  */
 
 salt = new com.mirantis.mk.Salt()
@@ -24,6 +27,8 @@
 workspace = ""
 def saltMastURL = ''
 def saltMastCreds = ''
+def packageUpgradeMode = ''
+batchSize = ''
 
 def triggerMirrorJob(String jobName, String reclassSystemBranch) {
     params = jenkinsUtils.getJobParameters(jobName)
@@ -89,7 +94,7 @@
     def wa29352SecretsFile = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets.yml"
     def _tempFile = '/tmp/wa29352_' + UUID.randomUUID().toString().take(8)
     try {
-        salt.cmdRun(venvPepper, 'I@salt:master', "grep -qiv root_private_key ${wa29352SecretsFile}", true, null, false)
+        salt.cmdRun(venvPepper, 'I@salt:master', "! grep -qi root_private_key: ${wa29352SecretsFile}", true, null, false)
         salt.cmdRun(venvPepper, 'I@salt:master', "test ! -f ${wa29352File}", true, null, false)
     }
     catch (Exception ex) {
@@ -130,16 +135,25 @@
 def wa29155(ArrayList saltMinions, String cname) {
     // WA for PROD-29155. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/
     // CHeck for existence cmp nodes, and try to render it. Is failed, apply ssh-key wa
-    def ret = ''
     def patched = false
     def wa29155ClassName = 'cluster.' + cname + '.infra.secrets_nova_wa29155'
     def wa29155File = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets_nova_wa29155.yml"
 
     try {
         salt.cmdRun(venvPepper, 'I@salt:master', "test ! -f ${wa29155File}", true, null, false)
+        def patch_required = false
+        for (String minion in saltMinions) {
+            def nova_key = salt.getPillar(venvPepper, minion, '_param:nova_compute_ssh_private').get("return")[0].values()[0]
+            if (nova_key == '' || nova_key == 'null' || nova_key == null) {
+                patch_required = true
+                break // no exception, proceeding to apply the patch
+            }
+        }
+        if (!patch_required) {
+            error('No need to apply work-around for PROD-29155')
+        }
     }
     catch (Exception ex) {
-        common.infoMsg('Work-around for PROD-29155 already apply, nothing todo')
         return
     }
     salt.fullRefresh(venvPepper, 'I@salt:master')
@@ -150,12 +164,7 @@
         } catch (Exception e) {
             common.errorMsg(e.toString())
             if (patched) {
-                error("Node: ${minion} failed to render after reclass-system upgrade!WA29155 probably didn't help.")
-            }
-            // check, that failed exactly by our case,  by key-length check.
-            def missed_key = salt.getPillar(venvPepper, minion, '_param:nova_compute_ssh_private').get("return")[0].values()[0]
-            if (missed_key != '') {
-                error("Node: ${minion} failed to render after reclass-system upgrade!")
+                error("Node: ${minion} failed to render after reclass-system upgrade! WA29155 probably didn't help.")
             }
             common.warningMsg('Perform: Attempt to apply WA for PROD-29155\n' +
                 'See https://gerrit.mcp.mirantis.com/#/c/37932/ for more info')
@@ -198,22 +207,23 @@
 
 }
 
-def wa32284(String clusterName) {
+def wa32284(String cluster_name) {
     def clientGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:enabled").get("return")[0].values()[0]
     def pkiGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:volumes:salt_pki").get("return")[0].values()[0]
     def nginxEnabledAtMaster = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:enabled').get('return')[0].values()[0]
     if (nginxEnabledAtMaster.toString().toLowerCase() == 'true' && clientGluster.toString().toLowerCase() == 'true' && pkiGluster) {
         def nginxRequires = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:wait_for_service').get('return')[0].values()[0]
         if (nginxRequires.isEmpty()) {
-            def nginxRequiresClassName = "cluster.${clusterName}.infra.config.nginx_requires_wa32284"
-            def nginxRequiresClassFile = "/srv/salt/reclass/classes/cluster/${clusterName}/infra/config/nginx_requires_wa32284.yml"
+            def nginxRequiresClassName = "cluster.${cluster_name}.infra.config.nginx_requires_wa32284"
+            def nginxRequiresClassFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/infra/config/nginx_requires_wa32284.yml"
             def nginxRequiresBlock = ['parameters': ['nginx': ['server': ['wait_for_service': ['srv-salt-pki.mount'] ] ] ] ]
             def _tempFile = '/tmp/wa32284_' + UUID.randomUUID().toString().take(8)
             writeYaml file: _tempFile , data: nginxRequiresBlock
             def nginxRequiresBlockString = sh(script: "cat ${_tempFile}", returnStdout: true).trim()
-            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${clusterName} && " +
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && " +
                 "sed -i '/^parameters:/i - ${nginxRequiresClassName}' infra/config/init.yml")
             salt.cmdRun(venvPepper, 'I@salt:master', "echo '${nginxRequiresBlockString}'  > ${nginxRequiresClassFile}", false, null, false)
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${nginxRequiresClassFile}")
         }
     }
 }
@@ -229,16 +239,17 @@
         if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
             contrailFiles.add('kubernetes/compute.yml')
         }
-        for(String contrailFile in contrailFiles) {
+        for (String contrailFile in contrailFiles) {
             contrailFile = "${clusterModelPath}/${contrailFile}"
             def containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- cluster\\.${cluster_name}\\.opencontrail\\.common(_wa32182)?\$' ${contrailFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
             if (containsFix) {
                 continue
             } else {
                 salt.cmdRun(venvPepper, 'I@salt:master', "grep -q -E '^parameters:' ${contrailFile} && sed -i '/^parameters:/i - cluster.${cluster_name}.opencontrail.common_wa32182' ${contrailFile} || " +
-                    "echo '- cluster.${cluster_name}.opencontrail.common_wa32182' >> ${contrailFile}")
+                        "echo '- cluster.${cluster_name}.opencontrail.common_wa32182' >> ${contrailFile}")
             }
         }
+        salt.cmdRun(venvPepper, 'I@salt:master', "test -f ${fixFile} && cd ${clusterModelPath} && git status && git add ${fixFile} || true")
     }
 }
 
@@ -254,23 +265,31 @@
 }
 
 def wa33771(String cluster_name) {
-    def octaviaEnabled = salt.getMinions(venvPepper, 'I@octavia:api:enabled')
-    def octaviaWSGI = salt.getMinions(venvPepper, 'I@apache:server:site:octavia_api')
-    if (octaviaEnabled && ! octaviaWSGI) {
-        def openstackControl = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/control.yml"
-        def octaviaFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/octavia_wa33771.yml"
-        def octaviaContext = [
-            'classes': [ 'system.apache.server.site.octavia' ],
-            'parameters': [
-                '_param': [ 'apache_octavia_api_address' : '${_param:cluster_local_address}' ],
-                'apache': [ 'server': [ 'site': [ 'apache_proxy_openstack_api_octavia': [ 'enabled': false ] ] ] ]
+    if (salt.getMinions(venvPepper, 'I@_param:openstack_node_role and I@apache:server')) {
+        def octaviaEnabled = salt.getMinions(venvPepper, 'I@octavia:api:enabled')
+        def octaviaWSGI = salt.getMinions(venvPepper, 'I@apache:server:site:octavia_api')
+        if (octaviaEnabled && !octaviaWSGI) {
+            def openstackControl = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/control.yml"
+            def octaviaFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/octavia_wa33771.yml"
+            def octaviaContext = [
+                    'classes'   : ['system.apache.server.site.octavia'],
+                    'parameters': [
+                            '_param': ['apache_octavia_api_address': '${_param:cluster_local_address}'],
+                    ]
             ]
-        ]
-        def _tempFile = '/tmp/wa33771' + UUID.randomUUID().toString().take(8)
-        writeYaml file: _tempFile , data: octaviaContext
-        def octaviaFileContent = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
-        salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.octavia_wa33771' ${openstackControl}")
-        salt.cmdRun(venvPepper, 'I@salt:master', "echo '${octaviaFileContent}' | base64 -d > ${octaviaFile}", false, null, false)
+            def openstackHTTPSEnabled = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_internal_protocol").get("return")[0].values()[0]
+            if (openstackHTTPSEnabled == 'https') {
+                octaviaContext['parameters'] << ['apache': ['server': ['site': ['apache_proxy_openstack_api_octavia': ['enabled': false]]]]]
+            }
+            def _tempFile = '/tmp/wa33771' + UUID.randomUUID().toString().take(8)
+            writeYaml file: _tempFile, data: octaviaContext
+            def octaviaFileContent = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
+            salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.octavia_wa33771' ${openstackControl}")
+            salt.cmdRun(venvPepper, 'I@salt:master', "echo '${octaviaFileContent}' | base64 -d > ${octaviaFile}", false, null, false)
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${octaviaFile}")
+        }
+    } else {
+        common.warningMsg("Apache server is not defined on controller nodes. Skipping Octavia WSGI workaround");
     }
 }
 
@@ -280,26 +299,186 @@
     def fixFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/${fixName}.yml"
     def containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- cluster\\.${cluster_name}\\.openstack\\.${fixName}\$' ${openstackControlFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
     if (! containsFix) {
-        def fixContext = [
-            'classes': [ 'service.nova.client', 'service.glance.client', 'service.neutron.client' ]
-        ]
+        def fixContext = [ 'classes': [ ] ]
+        def novaControllerNodes = salt.getMinions(venvPepper, 'I@nova:controller')
+        for (novaController in novaControllerNodes) {
+            def novaClientPillar = salt.getPillar(venvPepper, novaController, "nova:client").get("return")[0].values()[0]
+            if (novaClientPillar == '' || novaClientPillar == 'null' || novaClientPillar == null) {
+                fixContext['classes'] << 'service.nova.client'
+                break
+            }
+        }
+        def glanceServerNodes = salt.getMinions(venvPepper, 'I@glance:server')
+        for (glanceServer in glanceServerNodes) {
+            def glanceClientPillar = salt.getPillar(venvPepper, glanceServer, "glance:client").get("return")[0].values()[0]
+            if (glanceClientPillar == '' || glanceClientPillar == 'null' || glanceClientPillar == null) {
+                fixContext['classes'] << 'service.glance.client'
+                break
+            }
+        }
+        def neutronServerNodes = salt.getMinions(venvPepper, 'I@neutron:server')
+        for (neutronServer in neutronServerNodes) {
+            def neutronServerPillar = salt.getPillar(venvPepper, neutronServer, "neutron:client").get("return")[0].values()[0]
+            if (neutronServerPillar == '' || neutronServerPillar == 'null' || neutronServerPillar == null) {
+                fixContext['classes'] << 'service.neutron.client'
+                break
+            }
+        }
         if (salt.getMinions(venvPepper, 'I@manila:api:enabled')) {
-            fixContext['classes'] << 'service.manila.client'
+            def manilaApiNodes = salt.getMinions(venvPepper, 'I@manila:api')
+            for (manilaNode in manilaApiNodes) {
+                def manilaNodePillar = salt.getPillar(venvPepper, manilaNode, "manila:client").get("return")[0].values()[0]
+                if (manilaNodePillar == '' || manilaNodePillar == 'null' || manilaNodePillar == null) {
+                    fixContext['classes'] << 'service.manila.client'
+                    break
+                }
+            }
         }
         if (salt.getMinions(venvPepper, 'I@ironic:api:enabled')) {
-            fixContext['classes'] << 'service.ironic.client'
+            def ironicApiNodes = salt.getMinions(venvPepper, 'I@ironic:api')
+            for (ironicNode in ironicApiNodes) {
+                def ironicNodePillar = salt.getPillar(venvPepper, ironicNode, "ironic:client").get("return")[0].values()[0]
+                if (ironicNodePillar == '' || ironicNodePillar == 'null' || ironicNodePillar == null) {
+                    fixContext['classes'] << 'service.ironic.client'
+                    break
+                }
+            }
         }
         if (salt.getMinions(venvPepper, 'I@gnocchi:server:enabled')) {
-            fixContext['classes'] << 'service.gnocchi.client'
+            def gnocchiServerNodes = salt.getMinions(venvPepper, 'I@gnocchi:server')
+            for (gnocchiNode in gnocchiServerNodes) {
+                def gnocchiNodePillar = salt.getPillar(venvPepper, gnocchiNode, "gnocchi:client").get("return")[0].values()[0]
+                if (gnocchiNodePillar == '' || gnocchiNodePillar == 'null' || gnocchiNodePillar == null) {
+                    fixContext['classes'] << 'service.gnocchi.client'
+                    break
+                }
+            }
         }
+
         if (salt.getMinions(venvPepper, 'I@barbican:server:enabled')) {
-            fixContext['classes'] << 'service.barbican.client.single'
+            def barbicanServerNodes = salt.getMinions(venvPepper, 'I@barbican:server')
+            for (barbicanNode in barbicanServerNodes) {
+                def barbicanNodePillar = salt.getPillar(venvPepper, barbicanNode, "barbican:client").get("return")[0].values()[0]
+                if (barbicanNodePillar == '' || barbicanNodePillar == 'null' || barbicanNodePillar == null) {
+                    fixContext['classes'] << 'service.barbican.client.single'
+                    break
+                }
+            }
         }
-        def _tempFile = '/tmp/wa33930_33931' + UUID.randomUUID().toString().take(8)
-        writeYaml file: _tempFile , data: fixContext
-        def fixFileContent = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
-        salt.cmdRun(venvPepper, 'I@salt:master', "echo '${fixFileContent}' | base64 -d > ${fixFile}", false, null, false)
-        salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.${fixName}' ${openstackControlFile}")
+        if (fixContext['classes'] != []) {
+            def _tempFile = '/tmp/wa33930_33931' + UUID.randomUUID().toString().take(8)
+            writeYaml file: _tempFile, data: fixContext
+            def fixFileContent = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
+            salt.cmdRun(venvPepper, 'I@salt:master', "echo '${fixFileContent}' | base64 -d > ${fixFile}", false, null, false)
+            salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.${fixName}' ${openstackControlFile}")
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${fixFile}")
+        }
+    }
+}
+
+def wa34245(cluster_name) {
+    def infraInitFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/infra/init.yml"
+    def fixName = 'hosts_wa34245'
+    def fixFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/infra/${fixName}.yml"
+    if (salt.testTarget(venvPepper, 'I@keystone:server')) {
+        def fixApplied = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- cluster.${cluster_name}.infra.${fixName}\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+        if (!fixApplied) {
+            def fixFileContent = []
+            def containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+            if (!containsFix) {
+                fixFileContent << '- system.linux.network.hosts.openstack'
+            }
+            if (salt.testTarget(venvPepper, 'I@gnocchi:server')) {
+                containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\\.telemetry\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if (!containsFix) {
+                    fixFileContent << '- system.linux.network.hosts.openstack.telemetry'
+                }
+            }
+            if (salt.testTarget(venvPepper, 'I@manila:api')) {
+                containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\\.share\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if (!containsFix) {
+                    fixFileContent << '- system.linux.network.hosts.openstack.share'
+                }
+            }
+            if (salt.testTarget(venvPepper, 'I@barbican:server')) {
+                containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\\.kmn\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if (!containsFix) {
+                    fixFileContent << '- system.linux.network.hosts.openstack.kmn'
+                }
+            }
+            if (fixFileContent) {
+                salt.cmdRun(venvPepper, 'I@salt:master', "echo 'classes:\n${fixFileContent.join('\n')}' > ${fixFile}")
+                salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.infra.${fixName}' ${infraInitFile}")
+                salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${fixFile}")
+            }
+        }
+    }
+}
+
+def wa34528(String cluster_name) {
+    // Mysql users have to be defined on each Galera node
+    if(salt.getMinions(venvPepper, 'I@galera:master').isEmpty()) {
+        common.errorMsg('No Galera master found in cluster. Skipping')
+        return
+    }
+    def mysqlUsersMasterPillar = salt.getPillar(venvPepper, 'I@galera:master', 'mysql:server:database').get("return")[0].values()[0]
+    if (mysqlUsersMasterPillar == '' || mysqlUsersMasterPillar == 'null' || mysqlUsersMasterPillar == null) {
+        common.errorMsg('Pillar data is broken for Galera master node!')
+        input message: 'Do you want to ignore and continue without Galera pillar patch?'
+        return
+    }
+    def fileToPatch = salt.cmdRun(venvPepper, 'I@salt:master', "ls /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/init.yml || " +
+            "ls /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/slave.yml || echo 'File not found'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+    if (fileToPatch == 'File not found') {
+        common.errorMsg('Cluster model is old and cannot be patched for PROD-34528. Patching is possible for 2019.2.x cluster models only')
+        return
+    }
+    def patchRequired = false
+    def mysqlUsersSlavePillar = ''
+    def galeraSlaveNodes = salt.getMinions(venvPepper, 'I@galera:slave')
+    if (!galeraSlaveNodes.isEmpty()) {
+        for (galeraSlave in galeraSlaveNodes) {
+            mysqlUsersSlavePillar = salt.getPillar(venvPepper, galeraSlave, 'mysql:server:database').get("return")[0].values()[0]
+            if (mysqlUsersSlavePillar == '' || mysqlUsersSlavePillar == 'null' || mysqlUsersSlavePillar == null) {
+                common.errorMsg('Mysql users data is not defined for Galera slave nodes. Fixing...')
+                patchRequired = true
+                break
+            }
+        }
+        if (patchRequired) {
+            def fixFileContent = []
+            def fixName = 'db_wa34528'
+            def fixFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/${fixName}.yml"
+            for (dbName in mysqlUsersMasterPillar.keySet()) {
+                def classIncluded = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.galera\\.server\\.database\\.${dbName}\$'" +
+                        " /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/master.yml", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if(classIncluded) {
+                    fixFileContent << "- system.galera.server.database.${dbName}"
+                }
+                def sslClassIncluded = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.galera\\.server\\.database\\.x509\\.${dbName}\$'" +
+                        " /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/master.yml", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if(sslClassIncluded) {
+                    fixFileContent << "- system.galera.server.database.x509.${dbName}"
+                }
+            }
+            if (fixFileContent) {
+                salt.cmdRun(venvPepper, 'I@salt:master', "echo 'classes:\n${fixFileContent.join('\n')}' > ${fixFile}")
+                salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.database.${fixName}' ${fileToPatch}")
+            }
+            salt.fullRefresh(venvPepper, 'I@galera:slave')
+            // Verify
+            for (galeraSlave in galeraSlaveNodes) {
+                mysqlUsersSlavePillar = salt.getPillar(venvPepper, galeraSlave, 'mysql:server:database').get("return")[0].values()[0]
+                if (mysqlUsersSlavePillar == '' || mysqlUsersSlavePillar == 'null' || mysqlUsersSlavePillar == null || mysqlUsersSlavePillar.keySet() != mysqlUsersMasterPillar.keySet()) {
+                    common.errorMsg("Mysql user data is different on master and slave node ${galeraSlave}.")
+                    input message: 'Do you want to ignore and continue?'
+                }
+            }
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${fixFile}")
+            common.infoMsg('Galera slaves patching is done')
+        } else {
+            common.infoMsg('Galera slaves patching is not required')
+        }
     }
 }
 
@@ -353,6 +532,18 @@
     }
 }
 
+def checkCICDDocker() {
+    common.infoMsg('Perform: Checking if Docker containers are up')
+    try {
+        common.retry(10, 30) {
+            salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
+        }
+    }
+    catch (Exception ex) {
+        error("Docker containers for CI/CD services are having troubles with starting.")
+    }
+}
+
 if (common.validInputParam('PIPELINE_TIMEOUT')) {
     try {
         pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
@@ -391,11 +582,11 @@
             common.warningMsg("gitTargetMcpVersion has been changed to:${gitTargetMcpVersion}")
             def upgradeSaltStack = ''
             def updateClusterModel = ''
+            def applyWorkarounds = true
             def updatePipelines = ''
             def updateLocalRepos = ''
             def reclassSystemBranch = ''
             def reclassSystemBranchDefault = gitTargetMcpVersion
-            def batchSize = ''
             if (gitTargetMcpVersion ==~ /^\d\d\d\d\.\d\d?\.\d+$/) {
                 reclassSystemBranchDefault = "tags/${gitTargetMcpVersion}"
             } else if (gitTargetMcpVersion != 'proposed') {
@@ -412,6 +603,12 @@
                 updateLocalRepos = driveTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
                 reclassSystemBranch = driveTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
                 batchSize = driveTrainParams.get('BATCH_SIZE', '')
+                if (driveTrainParams.get('OS_DIST_UPGRADE', false).toBoolean() == true) {
+                    packageUpgradeMode = 'dist-upgrade'
+                } else if (driveTrainParams.get('OS_UPGRADE', false).toBoolean() == true) {
+                    packageUpgradeMode = 'upgrade'
+                }
+                applyWorkarounds = driveTrainParams.get('APPLY_MODEL_WORKAROUNDS', true).toBoolean()
             } else {
                 // backward compatibility for 2018.11.0
                 saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -429,7 +626,11 @@
                 error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
             }
             if (!batchSize) {
-                batchSize = getWorkerThreads(venvPepper)
+                // if no batch size provided get current worker threads and set batch size to 2/3 of it to avoid
+                // 'SaltReqTimeoutError: Message timed out' issue on Salt targets for large amount of nodes
+                // do not use toDouble/Double as it requires additional approved method
+                def workerThreads = getWorkerThreads(venvPepper).toInteger()
+                batchSize = (workerThreads * 2 / 3).toString().tokenize('.')[0]
             }
             def computeMinions = salt.getMinions(venvPepper, 'I@nova:compute')
 
@@ -452,7 +653,7 @@
                 if (updateClusterModel) {
                     common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
                     def dateTime = common.getDatetime()
-                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git submodule foreach git fetch")
+                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git fetch")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
                         "grep -r --exclude-dir=aptly -l 'mcp_version: .*' * | xargs --no-run-if-empty sed -i 's|mcp_version: .*|mcp_version: \"$targetMcpVersion\"|g'")
                     // Do the same, for deprecated variable-duplicate
@@ -495,7 +696,10 @@
                             "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|jenkins_security_ldap_server: .*|jenkins_security_ldap_server: \"ldaps://${jenkinsldapURI}\"|g'")
                     }
 
-                    wa32284(cluster_name)
+                    if (applyWorkarounds) {
+                        wa32284(cluster_name)
+                        wa34245(cluster_name)
+                    }
 
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
                     // Add kubernetes-extra repo
@@ -525,10 +729,13 @@
                             }
                         }
                     }
-                    wa32182(cluster_name)
-                    wa33771(cluster_name)
-                    wa33867(cluster_name)
-                    wa33930_33931(cluster_name)
+                    if (applyWorkarounds) {
+                        wa32182(cluster_name)
+                        wa33771(cluster_name)
+                        wa33867(cluster_name)
+                        wa33930_33931(cluster_name)
+                        wa34528(cluster_name)
+                    }
                     // Add new defaults
                     common.infoMsg("Add new defaults")
                     salt.cmdRun(venvPepper, 'I@salt:master', "grep '^    mcp_version: ' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
@@ -557,9 +764,10 @@
                     common.warningMsg('Failed to update Salt Formulas repos/packages. Check current available documentation on https://docs.mirantis.com/mcp/latest/, how to update packages.')
                     input message: 'Continue anyway?'
                 }
-
-                wa29352(cluster_name)
-                wa29155(computeMinions, cluster_name)
+                if (applyWorkarounds) {
+                    wa29352(cluster_name)
+                    wa29155(computeMinions, cluster_name)
+                }
 
                 try {
                     common.infoMsg('Perform: UPDATE Reclass package')
@@ -662,7 +870,7 @@
                 salt.enforceState(venvPepper, 'I@salt:minion', 'salt.minion.cert', true, true, batchSize, false, 60, 2)
 
                 // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
-                salt.enforceState(venvPepper, 'I@salt:minion', 'salt.minion', true, true, null, false, 60, 2)
+                salt.enforceState(venvPepper, 'I@salt:minion', 'salt.minion', true, true, batchSize, false, 60, 2)
                 // Retry needed only for rare race-condition in user appearance
                 common.infoMsg('Perform: updating users and keys')
                 salt.enforceState(venvPepper, 'I@linux:system', 'linux.system.user', true, true, batchSize, false, 60, 2)
@@ -680,6 +888,7 @@
                 def wrongPluginJarName = "${gerritGlusterPath}/plugins/project-download-commands.jar"
                 salt.cmdRun(venvPepper, 'I@gerrit:client', "test -f ${wrongPluginJarName} && rm ${wrongPluginJarName} || true")
 
+                salt.enforceStateWithTest(venvPepper, 'I@jenkins:client and I@docker:client:images and not I@salt:master', 'docker.client.images', "", true, true, null, true, 60, 2)
                 salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
             }
         }
@@ -692,19 +901,12 @@
     // docker.client state may trigger change of jenkins master or jenkins slave services,
     // so we need wait for slave to reconnect and continue pipeline
     sleep(180)
+    def cidNodes = []
     node('python') {
         try {
             stage('Update Drivetrain: Phase 2') {
                 python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
-                common.infoMsg('Perform: Checking if Docker containers are up')
-                try {
-                    common.retry(20, 30) {
-                        salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
-                    }
-                }
-                catch (Exception ex) {
-                    error("Docker containers for CI/CD services are having troubles with starting.")
-                }
+                checkCICDDocker()
 
                 // Apply changes for HaProxy on CI/CD nodes
                 salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
@@ -716,6 +918,9 @@
                     salt.enforceState(venvPepper, 'I@nginx:server:site:nginx_proxy_jenkins and I@nginx:server:site:nginx_proxy_gerrit', 'nginx.server', true, true, null, false, 60, 2)
                 }
             }
+            if (packageUpgradeMode) {
+                cidNodes = salt.getMinions(venvPepper, 'I@_param:drivetrain_role:cicd')
+            }
         }
         catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
@@ -723,4 +928,31 @@
             throw e
         }
     }
+
+    stage('Upgrade OS') {
+        if (packageUpgradeMode) {
+            def debian = new com.mirantis.mk.Debian()
+            def statusFile = '/tmp/rebooted_during_upgrade'
+            for(cidNode in cidNodes) {
+                node('python') {
+                    python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
+                    // cmd.run async to prevent connection close in case of slave shutdown, give 5 seconds to handle request response
+                    salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${cidNode}' cmd.run 'sleep 5; touch ${statusFile}; salt-call service.stop docker' --async")
+                }
+                sleep(30)
+                node('python') {
+                    python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
+                    debian.osUpgradeNode(venvPepper, cidNode, packageUpgradeMode, false, 60)
+                    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': cidNode, wait: 60, timeout: 10])
+                    if (salt.runSaltProcessStep(venvPepper, cidNode, 'file.file_exists', [statusFile], null, true, 5)['return'][0].values()[0].toBoolean()) {
+                        salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${cidNode}' cmd.run 'rm ${statusFile} && salt-call service.start docker'") // in case if node was not rebooted
+                        sleep(10)
+                    }
+                    checkCICDDocker()
+                }
+            }
+        } else {
+            common.infoMsg('Upgrade OS skipped...')
+        }
+    }
 }