Merge "replaced hardcoded "ceph_disk" literal with variable based on the lvm_enabled pillar"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index bfed52d..144f604 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -49,6 +49,7 @@
  *   BATCH_SIZE                 Use batching for states, which may be targeted for huge amount of nodes. Format:
                                 - 10 - number of nodes
                                 - 10% - percentage of all targeted nodes
+ *   DIST_UPGRADE_NODES         Whether to run "apt-get dist-upgrade" on all nodes in cluster before deployment
 
  *
  * Test settings:
@@ -80,6 +81,7 @@
 python = new com.mirantis.mk.Python()
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
+debian = new com.mirantis.mk.Debian()
 
 _MAX_PERMITTED_STACKS = 2
 overwriteFile = "/srv/salt/reclass/classes/cluster/override.yml"
@@ -113,6 +115,10 @@
 if (common.validInputParam('BATCH_SIZE')) {
     batch_size = "${BATCH_SIZE}"
 }
+def upgrade_nodes = false
+if (common.validInputParam('DIST_UPGRADE_NODES')) {
+    upgrade_nodes = "${DIST_UPGRADE_NODES}".toBoolean()
+}
 
 timeout(time: 12, unit: 'HOURS') {
     node(slave_node) {
@@ -356,7 +362,11 @@
             // Install
             //
             if (!batch_size) {
-                batch_size = salt.getWorkerThreads(venvPepper)
+                // if no batch size provided get current worker threads and set batch size to 2/3 of it to avoid
+                // 'SaltReqTimeoutError: Message timed out' issue on Salt targets for large amount of nodes
+                // do not use toDouble/Double as it requires additional approved method
+                def workerThreads = salt.getWorkerThreads(venvPepper).toInteger()
+                batch_size = (workerThreads * 2 / 3).toString().tokenize('.')[0]
             }
 
             // Check if all minions are reachable and ready
@@ -371,12 +381,20 @@
                     orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt, batch_size)
 
                     if (common.checkContains('STACK_INSTALL', 'kvm')) {
+                        if (upgrade_nodes) {
+                            debian.osUpgradeNode(venvPepper, 'I@salt:control', 'dist-upgrade', 30, 20, batch_size)
+                            salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': 'I@salt:control', wait: 60, timeout: 10])
+                        }
                         orchestrate.installInfraKvm(venvPepper, extra_tgt)
-                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt)
+                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt, batch_size)
                     }
 
                     orchestrate.validateFoundationInfra(venvPepper, extra_tgt)
                 }
+                if (upgrade_nodes) {
+                    debian.osUpgradeNode(venvPepper, 'not ( I@salt:master or I@salt:control )', 'dist-upgrade', false, 30, 10, batch_size)
+                    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': 'not ( I@salt:master or I@salt:control )', wait: 60, timeout: 10])
+                }
             }
 
             stage('Install infra') {
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 13f41ea..f6ec27b 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -121,6 +121,18 @@
                         url: '',
                         xpath: '/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
                 }
+                try {
+                    sh """
+                        for i in ${artifacts_dir}/*.xml; do
+                            grep 'failures="0"' \$i
+                            grep 'errors="0"' \$i
+                        done
+                    """
+                } catch(err) {
+                    currentBuild.result = "FAILURE"
+                    common.errorMsg("[ERROR] Failures or errors is not zero in ${artifacts_dir}/*.xml")
+                    throw err
+                }
             }
         }
     }
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 2f89659..c80b53b 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -128,7 +128,7 @@
                 }
 
                 // Make sure that dedicated opencontrail user is created
-                salt.enforceState(pepperEnv, 'I@keystone:server:role:primary', 'keystone.client.server')
+                salt.enforceState(pepperEnv, 'I@keystone:server:role:primary', 'keystone.client.resources.v3')
 
                 try {
                     controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "docker:client:compose:opencontrail:service:controller:image")
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 780beac..c4db64b 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -1,5 +1,5 @@
 /**
- * Update packages on given nodes
+ * Deploy OpenStack compute node
  *
  * Expected parameters:
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
@@ -44,6 +44,11 @@
                 common.infoMsg("Selected nodes: ${targetLiveAll}")
             }
 
+            stage('Sync modules') {
+                // Sync all of the modules from the salt master.
+                salt.syncAll(pepperEnv, targetLiveAll, batch_size)
+            }
+
             stage("Trusty workaround") {
                 if(salt.getGrain(pepperEnv, minions[0], "oscodename")['return'][0].values()[0]["oscodename"] == "trusty") {
                     common.infoMsg("First node %nodename% has trusty")
@@ -63,14 +68,7 @@
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], batch_size, true)
             }
 
-            stage("Update Hosts file") {
-                salt.enforceState(pepperEnv, "I@linux:system", 'linux.network.host', true, true, batch_size)
-            }
-
             stage("Setup networking") {
-                // Sync all of the modules from the salt master.
-                salt.syncAll(pepperEnv, targetLiveAll, batch_size)
-
                 // Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], batch_size, true)
 
@@ -84,8 +82,8 @@
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], batch_size, false)
                 salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], batch_size, false)
 
-                // Restart networking to bring UP all interfaces.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], batch_size, true, 300)
+                // Restart networking to bring UP all interfaces and restart minion to catch network changes.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["salt-call service.restart networking; salt-call service.restart salt-minion"], batch_size, true, 300)
             }
 
             stage("Highstate compute") {
@@ -100,9 +98,6 @@
                 // Execute highstate.
                 salt.enforceHighstate(pepperEnv, targetLiveAll, true, true, batch_size)
 
-                // Restart supervisor-vrouter.
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], batch_size, true, 300)
-
                 // Apply salt and collectd if is present to update information about current network interfaces.
                 salt.enforceState(pepperEnv, targetLiveAll, 'salt', true, true, batch_size)
                 if(!salt.getPillar(pepperEnv, minions[0], "collectd")['return'][0].values()[0].isEmpty()) {
@@ -110,16 +105,34 @@
                 }
             }
 
-        stage("Update/Install monitoring") {
-            //Collect Grains
-            salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains', true, true, batch_size)
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules', [], batch_size)
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update', [], batch_size)
-            sleep(5)
+            // host records and fingerprints for compute nodes are generated dynamically - so apply state after node setup
+            stage('Update Hosts file and fingerprints') {
+                salt.enforceState(pepperEnv, "I@linux:network:host", 'linux.network.host', true, true, batch_size)
+                salt.enforceState(pepperEnv, "I@linux:system", 'openssh', true, true, batch_size)
+            }
 
-            salt.enforceState(pepperEnv, targetLiveAll, 'prometheus', true, true, batch_size)
-            salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus', true, true, batch_size)
-        }
+            // discover added compute hosts
+            stage('Discover compute hosts') {
+                salt.runSaltProcessStep(pepperEnv, 'I@nova:controller:role:primary', 'state.sls_id', ['nova_controller_discover_hosts', 'nova.controller'], batch_size, true)
+            }
+
+            stage("Update/Install monitoring") {
+                def slaServers = 'I@prometheus:server'
+                def slaMinions = salt.getMinions(pepperEnv, slaServers)
+
+                if (slaMinions.isEmpty()) {
+                    common.infoMsg('Monitoring is not enabled on environment, skipping...')
+                } else {
+                    //Collect Grains
+                    salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains', true, true, batch_size)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules', [], batch_size)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update', [], batch_size)
+                    sleep(5)
+
+                    salt.enforceState(pepperEnv, targetLiveAll, 'prometheus', true, true, batch_size)
+                    salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus', true, true, batch_size)
+                }
+            }
 
         } catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index e768564..d2161bc 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -160,8 +160,9 @@
           upgrade_mode = 'upgrade'
         }
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
-          debian.osUpgradeNode(env, target, upgrade_mode, false)
+          debian.osUpgradeNode(env, target, upgrade_mode, false, 60, 10)
         }
+        salt.checkTargetMinionsReady(['saltId': env, 'target': target, wait: 60, timeout: 10])
         // Workaround for PROD-31413, install python-tornado from latest release if available and
         // restart minion to apply new code.
         salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
diff --git a/update-ceph.groovy b/update-ceph.groovy
index e5c7c8e..5953ae6 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -46,6 +46,12 @@
                     salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mon.target")
                     ceph.waitForHealthy(pepperEnv, tgt, flags)
                 }
+                selMinions = salt.getMinions(pepperEnv, "I@ceph:mgr")
+                for (tgt in selMinions) {
+                    // runSaltProcessStep 'service.restart' don't work for this services
+                    salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mgr.target")
+                    ceph.waitForHealthy(pepperEnv, tgt, flags)
+                }
                 selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
                 for (tgt in selMinions) {
                     salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 7695159..8010850 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -24,6 +24,7 @@
 workspace = ""
 def saltMastURL = ''
 def saltMastCreds = ''
+def packageUpgradeMode = ''
 
 def triggerMirrorJob(String jobName, String reclassSystemBranch) {
     params = jenkinsUtils.getJobParameters(jobName)
@@ -198,22 +199,23 @@
 
 }
 
-def wa32284(String clusterName) {
+def wa32284(String cluster_name) {
     def clientGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:enabled").get("return")[0].values()[0]
     def pkiGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:volumes:salt_pki").get("return")[0].values()[0]
     def nginxEnabledAtMaster = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:enabled').get('return')[0].values()[0]
     if (nginxEnabledAtMaster.toString().toLowerCase() == 'true' && clientGluster.toString().toLowerCase() == 'true' && pkiGluster) {
         def nginxRequires = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:wait_for_service').get('return')[0].values()[0]
         if (nginxRequires.isEmpty()) {
-            def nginxRequiresClassName = "cluster.${clusterName}.infra.config.nginx_requires_wa32284"
-            def nginxRequiresClassFile = "/srv/salt/reclass/classes/cluster/${clusterName}/infra/config/nginx_requires_wa32284.yml"
+            def nginxRequiresClassName = "cluster.${cluster_name}.infra.config.nginx_requires_wa32284"
+            def nginxRequiresClassFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/infra/config/nginx_requires_wa32284.yml"
             def nginxRequiresBlock = ['parameters': ['nginx': ['server': ['wait_for_service': ['srv-salt-pki.mount'] ] ] ] ]
             def _tempFile = '/tmp/wa32284_' + UUID.randomUUID().toString().take(8)
             writeYaml file: _tempFile , data: nginxRequiresBlock
             def nginxRequiresBlockString = sh(script: "cat ${_tempFile}", returnStdout: true).trim()
-            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${clusterName} && " +
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && " +
                 "sed -i '/^parameters:/i - ${nginxRequiresClassName}' infra/config/init.yml")
             salt.cmdRun(venvPepper, 'I@salt:master', "echo '${nginxRequiresBlockString}' > ${nginxRequiresClassFile}", false, null, false)
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${nginxRequiresClassFile}")
         }
     }
 }
@@ -239,6 +241,7 @@
                     "echo '- cluster.${cluster_name}.opencontrail.common_wa32182' >> ${contrailFile}")
             }
         }
+        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${fixFile}")
     }
 }
 
@@ -263,14 +266,18 @@
             'classes': [ 'system.apache.server.site.octavia' ],
             'parameters': [
                 '_param': [ 'apache_octavia_api_address' : '${_param:cluster_local_address}' ],
-                'apache': [ 'server': [ 'site': [ 'apache_proxy_openstack_api_octavia': [ 'enabled': false ] ] ] ]
             ]
         ]
+        def openstackHTTPSEnabled = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_internal_protocol").get("return")[0].values()[0]
+        if (openstackHTTPSEnabled == 'https') {
+            octaviaContext['parameters'] << [ 'apache': [ 'server': [ 'site': [ 'apache_proxy_openstack_api_octavia': [ 'enabled': false ] ] ] ] ]
+        }
         def _tempFile = '/tmp/wa33771' + UUID.randomUUID().toString().take(8)
         writeYaml file: _tempFile , data: octaviaContext
         def octaviaFileContent = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
         salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.octavia_wa33771' ${openstackControl}")
         salt.cmdRun(venvPepper, 'I@salt:master', "echo '${octaviaFileContent}' | base64 -d > ${octaviaFile}", false, null, false)
+        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${octaviaFile}")
     }
 }
 
@@ -300,6 +307,113 @@
         def fixFileContent = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
         salt.cmdRun(venvPepper, 'I@salt:master', "echo '${fixFileContent}' | base64 -d > ${fixFile}", false, null, false)
         salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.${fixName}' ${openstackControlFile}")
+        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${fixFile}")
+    }
+}
+
+def wa34245(cluster_name) {
+    def infraInitFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/infra/init.yml"
+    def fixName = 'hosts_wa34245'
+    def fixFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/infra/${fixName}.yml"
+    if (salt.testTarget(venvPepper, 'I@keystone:server')) {
+        def fixApplied = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- cluster.${cluster_name}.infra.${fixName}\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+        if (!fixApplied) {
+            def fixFileContent = []
+            def containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+            if (!containsFix) {
+                fixFileContent << '- system.linux.network.hosts.openstack'
+            }
+            if (salt.testTarget(venvPepper, 'I@gnocchi:server')) {
+                containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\\.telemetry\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if (!containsFix) {
+                    fixFileContent << '- system.linux.network.hosts.openstack.telemetry'
+                }
+            }
+            if (salt.testTarget(venvPepper, 'I@manila:api')) {
+                containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\\.share\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if (!containsFix) {
+                    fixFileContent << '- system.linux.network.hosts.openstack.share'
+                }
+            }
+            if (salt.testTarget(venvPepper, 'I@barbican:server')) {
+                containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.linux\\.network\\.hosts\\.openstack\\.kmn\$' ${infraInitFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if (!containsFix) {
+                    fixFileContent << '- system.linux.network.hosts.openstack.kmn'
+                }
+            }
+            if (fixFileContent) {
+                salt.cmdRun(venvPepper, 'I@salt:master', "echo 'classes:\n${fixFileContent.join('\n')}' > ${fixFile}")
+                salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.infra.${fixName}' ${infraInitFile}")
+                salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${fixFile}")
+            }
+        }
+    }
+}
+
+def wa34528(String cluster_name) {
+    // Mysql users have to be defined on each Galera node
+    if(salt.getMinions(venvPepper, 'I@galera:master').isEmpty()) {
+        common.errorMsg('No Galera master found in cluster. Skipping')
+        return
+    }
+    def mysqlUsersMasterPillar = salt.getPillar(venvPepper, 'I@galera:master', 'mysql:server:database').get("return")[0].values()[0]
+    if (mysqlUsersMasterPillar == '' || mysqlUsersMasterPillar == 'null' || mysqlUsersMasterPillar == null) {
+        common.errorMsg('Pillar data is broken for Galera master node!')
+        input message: 'Do you want to ignore and continue without Galera pillar patch?'
+        return
+    }
+    def fileToPatch = salt.cmdRun(venvPepper, 'I@salt:master', "ls /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/init.yml || " +
+            "ls /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/slave.yml || echo 'File not found'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+    if (fileToPatch == 'File not found') {
+        common.errorMsg('Cluster model is old and cannot be patched for PROD-34528. Patching is possible for 2019.2.x cluster models only')
+        return
+    }
+    def patchRequired = false
+    def mysqlUsersSlavePillar = ''
+    def galeraSlaveNodes = salt.getMinions(venvPepper, 'I@galera:slave')
+    if (!galeraSlaveNodes.isEmpty()) {
+        for (galeraSlave in galeraSlaveNodes) {
+            mysqlUsersSlavePillar = salt.getPillar(venvPepper, galeraSlave, 'mysql:server:database').get("return")[0].values()[0]
+            if (mysqlUsersSlavePillar == '' || mysqlUsersSlavePillar == 'null' || mysqlUsersSlavePillar == null) {
+                common.errorMsg('Mysql users data is not defined for Galera slave nodes. Fixing...')
+                patchRequired = true
+                break
+            }
+        }
+        if (patchRequired) {
+            def fixFileContent = []
+            def fixName = 'db_wa34528'
+            def fixFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/${fixName}.yml"
+            for (dbName in mysqlUsersMasterPillar.keySet()) {
+                def classIncluded = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.galera\\.server\\.database\\.${dbName}\$'" +
+                        " /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/master.yml", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if(classIncluded) {
+                    fixFileContent << "- system.galera.server.database.${dbName}"
+                }
+                def sslClassIncluded = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- system\\.galera\\.server\\.database\\.x509\\.${dbName}\$'" +
+                        " /srv/salt/reclass/classes/cluster/${cluster_name}/openstack/database/master.yml", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                if(sslClassIncluded) {
+                    fixFileContent << "- system.galera.server.database.x509.${dbName}"
+                }
+            }
+            if (fixFileContent) {
+                salt.cmdRun(venvPepper, 'I@salt:master', "echo 'classes:\n${fixFileContent.join('\n')}' > ${fixFile}")
+                salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.database.${fixName}' ${fileToPatch}")
+            }
+            salt.fullRefresh(venvPepper, 'I@galera:slave')
+            // Verify
+            for (galeraSlave in galeraSlaveNodes) {
+                mysqlUsersSlavePillar = salt.getPillar(venvPepper, galeraSlave, 'mysql:server:database').get("return")[0].values()[0]
+                if (mysqlUsersSlavePillar == '' || mysqlUsersSlavePillar == 'null' || mysqlUsersSlavePillar == null || mysqlUsersSlavePillar.keySet() != mysqlUsersMasterPillar.keySet()) {
+                    common.errorMsg("Mysql user data is different on master and slave node ${galeraSlave}.")
+                    input message: 'Do you want to ignore and continue?'
+                }
+            }
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && git add ${fixFile}")
+            common.infoMsg('Galera slaves patching is done')
+        } else {
+            common.infoMsg('Galera slaves patching is not required')
+        }
     }
 }
 
@@ -334,6 +448,18 @@
     }
 }
 
+def checkCICDDocker() {
+    common.infoMsg('Perform: Checking if Docker containers are up')
+    try {
+        common.retry(10, 30) {
+            salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
+        }
+    }
+    catch (Exception ex) {
+        error("Docker containers for CI/CD services are having troubles with starting.")
+    }
+}
+
 if (common.validInputParam('PIPELINE_TIMEOUT')) {
     try {
         pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
@@ -393,6 +519,11 @@
                 updateLocalRepos = driveTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
                 reclassSystemBranch = driveTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
                 batchSize = driveTrainParams.get('BATCH_SIZE', '')
+                if (driveTrainParams.get('OS_DIST_UPGRADE', false).toBoolean() == true) {
+                    packageUpgradeMode = 'dist-upgrade'
+                } else if (driveTrainParams.get('OS_UPGRADE', false).toBoolean() == true) {
+                    packageUpgradeMode = 'upgrade'
+                }
             } else {
                 // backward compatibility for 2018.11.0
                 saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -410,7 +541,11 @@
                 error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
             }
             if (!batchSize) {
-                batchSize = getWorkerThreads(venvPepper)
+                // if no batch size provided get current worker threads and set batch size to 2/3 of it to avoid
+                // 'SaltReqTimeoutError: Message timed out' issue on Salt targets for large amount of nodes
+                // do not use toDouble/Double as it requires additional approved method
+                def workerThreads = getWorkerThreads(venvPepper).toInteger()
+                batch_size = (workerThreads * 2 / 3).toString().tokenize('.')[0]
             }
             def computeMinions = salt.getMinions(venvPepper, 'I@nova:compute')
 
@@ -432,7 +567,7 @@
                 if (updateClusterModel) {
                     common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
                     def dateTime = common.getDatetime()
-                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git submodule foreach git fetch")
+                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git fetch")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
                         "grep -r --exclude-dir=aptly -l 'mcp_version: .*' * | xargs --no-run-if-empty sed -i 's|mcp_version: .*|mcp_version: \"$targetMcpVersion\"|g'")
                     // Do the same, for deprecated variable-duplicate
@@ -476,6 +611,7 @@
                     }
 
                     wa32284(cluster_name)
+                    wa34245(cluster_name)
 
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
                     // Add kubernetes-extra repo
@@ -509,6 +645,7 @@
                     wa33771(cluster_name)
                     wa33867(cluster_name)
                     wa33930_33931(cluster_name)
+                    wa34528(cluster_name)
                     // Add new defaults
                     common.infoMsg("Add new defaults")
                     salt.cmdRun(venvPepper, 'I@salt:master', "grep '^    mcp_version: ' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
@@ -660,19 +797,12 @@
     // docker.client state may trigger change of jenkins master or jenkins slave services,
     // so we need wait for slave to reconnect and continue pipeline
     sleep(180)
+    def cidNodes = []
     node('python') {
         try {
             stage('Update Drivetrain: Phase 2') {
                 python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
-                common.infoMsg('Perform: Checking if Docker containers are up')
-                try {
-                    common.retry(10, 30) {
-                        salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
-                    }
-                }
-                catch (Exception ex) {
-                    error("Docker containers for CI/CD services are having troubles with starting.")
-                }
+                checkCICDDocker()
 
                 // Apply changes for HaProxy on CI/CD nodes
                 salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
@@ -684,6 +814,9 @@
                     salt.enforceState(venvPepper, 'I@nginx:server:site:nginx_proxy_jenkins and I@nginx:server:site:nginx_proxy_gerrit', 'nginx.server', true, true, null, false, 60, 2)
                 }
             }
+            if (packageUpgradeMode) {
+                cidNodes = salt.getMinions(venvPepper, 'I@_param:drivetrain_role:cicd')
+            }
         }
         catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
@@ -691,4 +824,31 @@
             throw e
         }
     }
+
+    stage('Upgrade OS') {
+        if (packageUpgradeMode) {
+            def debian = new com.mirantis.mk.Debian()
+            def statusFile = '/tmp/rebooted_during_upgrade'
+            for(cidNode in cidNodes) {
+                node('python') {
+                    python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
+                    // cmd.run async to prevent connection close in case of slave shutdown, give 5 seconds to handle request response
+                    salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${cidNode}' cmd.run 'sleep 5; touch ${statusFile}; salt-call service.stop docker' --async")
+                }
+                sleep(30)
+                node('python') {
+                    python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
+                    debian.osUpgradeNode(venvPepper, cidNode, packageUpgradeMode, false, 60)
+                    salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': cidNode, wait: 60, timeout: 10])
+                    if (salt.runSaltProcessStep(venvPepper, cidNode, 'file.file_exists', [statusFile], null, true, 5)['return'][0].values()[0].toBoolean()) {
+                        salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${cidNode}' cmd.run 'rm ${statusFile} && salt-call service.start docker'") // in case if node was not rebooted
+                        sleep(10)
+                    }
+                    checkCICDDocker()
+                }
+            }
+        } else {
+            common.infoMsg('Upgrade OS skipped...')
+        }
+    }
 }