Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0

34035bf Save file permissions before backup if possible
8b90317 Fix pillar for getting maas node
63b40fc Increase verbosity of galera restore pipeline
820e786 Do not raise error in case of initial health-check failure
b7a0ad5 Restart contrail-control service after DB rollback
ddd0cbb Use proper method for salt-minion restart
1b11d23 Add nginx wait-for-service block for waiting srv-salt-pki.mount service
b6d18d5 Install sysstat package from pipeline if missed
6ef32f0 Use input where ask for confirmation is required
c304b4f Fix issues in pipeline for backupninja backup
37f41db Add step for checking needed version of postgresql before maasdb backup
312bcd4 Update Nginx proxy settings for Gerrit/Jenkins during upgrade
fe1ea5c Fix job name for galera backup
1116744 [CVP] Add force_pull parameter for cvp sanity/spt/stacklight jobs
2219bf4 Several fixes for backupninja pipelines
2935612 [CVP] Fix skiplist functionality in cvp-tempest
9c4a84d Add 'Galera DB backup' pipeline
7f3c386 Refactoring for using minion names instead of pillars and some standart functions instead of cmds Related-Prod: #PROD-30065 (PROD:30065) Related-Prod: #PROD-29949 (PROD:29949) Cherry-picked from commit: f99b1165574a761743226148d843b91f3e4e2cc9
13bb136 Add openstack-galera-upgrade.groovy pipeline
54e4553 [CVP] Disable docker pull for cvp-sanity/spt/stacklight jobs
d662e5c Apply 'linux.system.repo' state before dist-upgrade operation.
323ffde Add handling of resultCodes for disk i/o utilization check
0102826 [CVP] Define runtest_tempest_cfg_dir in Initialization stage
b8b50b3 [CVP] Fix DEBUG_MODE for cvp-tempest
915b8f8 Cassandra repair pipeline fixes:
278179b Install python-tornado with OS CP/DP upgrade
ce96717 Do not create db backup in Cassandra restore pipeline
79a2d59 Cassandra repair: fix cassandra data cleanup procedure
21c78be Switch Jenkins/Gerrit to TLS/SSL auth in LDAP during upgrade
7ba3359 Add RESTORE_TYPE to galera restore pipeline
106efe7 Increase timeout for ES to become green
8484431 Use source credentials to clone git sources if set
bc44032 Apply Haproxy state during DriveTrain upgrade on cid nodes
ea0ba7d Use old style for enforseState function without Map parameters
2c681d6 add backupninja backup pipeline
fbcee41 [Upgrade] Add check for cluster name availability
ade2248 add cfg01 restore pipeline
71a08db Update classes to new Galera.groovy class

Change-Id: I6c386adabe21708681b566cb4a5b467267efb424
diff --git a/backupninja-backup-pipeline.groovy b/backupninja-backup-pipeline.groovy
new file mode 100644
index 0000000..4410ea9
--- /dev/null
+++ b/backupninja-backup-pipeline.groovy
@@ -0,0 +1,113 @@
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def pepperEnv = "pepperEnv"
+def askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
+
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        def backupNode = ''
+        def backupServer = ''
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+        stage('Verify pillar for backups') {
+            try {
+                def masterPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:master:initial_data')
+                if (masterPillar['return'].isEmpty()) {
+                    throw new Exception('Problem with salt-master pillar.')
+                }
+                def minionPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
+                if (minionPillar['return'].isEmpty()) {
+                    throw new Exception('Problem with salt-minion pillar.')
+                }
+            }
+            catch (Exception e) {
+                common.errorMsg(e.getMessage())
+                common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/salt-master.html')
+                return
+            }
+        }
+        stage('Check backup location') {
+            try {
+                backupNode = salt.getMinions(pepperEnv, "I@backupninja:client")[0]
+                salt.minionsReachable(pepperEnv, "I@salt:master", backupNode)
+            }
+            catch (Exception e) {
+                common.errorMsg(e.getMessage())
+                common.errorMsg("Pipeline wasn't able to detect backupninja:client pillar or the minion is not reachable")
+                currentBuild.result = "FAILURE"
+                return
+            }
+
+            def postgresqlMajorVersion = salt.getPillar(pepperEnv, 'I@salt:master', '_param:postgresql_major_version').get('return')[0].values()[0]
+            if (! postgresqlMajorVersion) {
+                input message: "Can't get _param:postgresql_major_version parameter, which is required to determine postgresql-client version. Is it defined in pillar? Confirm to proceed anyway."
+            } else {
+                def postgresqlClientPackage = "postgresql-client-${postgresqlMajorVersion}"
+                try {
+                    if (!salt.isPackageInstalled(['saltId': pepperEnv, 'target': backupNode, 'packageName': postgresqlClientPackage, 'output': false])) {
+                        if (askConfirmation) {
+                            input message: "Do you want to install ${postgresqlClientPackage} package on targeted nodes: ${backupNode}? It's required to make backup. Click to confirm"
+                        }
+                        // update also common fake package
+                        salt.runSaltProcessStep(pepperEnv, backupNode, 'pkg.install', ["postgresql-client,${postgresqlClientPackage}"])
+                    }
+                } catch (Exception e) {
+                    common.errorMsg("Unable to determine status of ${postgresqlClientPackage} packages on target nodes: ${backupNode}.")
+                    if (askConfirmation) {
+                        input message: "Do you want to continue? Click to confirm"
+                    }
+                }
+            }
+
+            try {
+                backupServer = salt.getMinions(pepperEnv, "I@backupninja:server")[0]
+                salt.minionsReachable(pepperEnv, "I@salt:master", backupServer)
+            }
+            catch (Exception e) {
+                common.errorMsg(e.getMessage())
+                common.errorMsg("Pipeline wasn't able to detect backupninja:server pillar or the minion is not reachable")
+                currentBuild.result = "FAILURE"
+                return
+            }
+        }
+        stage('Prepare for backup') {
+            salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:server', 'state': 'backupninja'])
+            salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:client', 'state': 'backupninja'])
+            def backupMasterSource = salt.getReturnValues(salt.getPillar(pepperEnv, backupNode, 'salt:master:initial_data:source'))
+            def backupMinionSource = salt.getReturnValues(salt.getPillar(pepperEnv, backupNode, 'salt:minion:initial_data:source'))
+            [backupServer, backupMasterSource, backupMinionSource].unique().each {
+                salt.cmdRun(pepperEnv, backupNode, "ssh-keygen -F ${it} || ssh-keyscan -H ${it} >> /root/.ssh/known_hosts")
+            }
+            def maasNodes = salt.getMinions(pepperEnv, 'I@maas:region')
+            if (!maasNodes.isEmpty()) {
+                common.infoMsg("Trying to save maas file permissions on ${maasNodes} if possible")
+                salt.cmdRun(pepperEnv, 'I@maas:region', 'which getfacl && getfacl -pR /var/lib/maas/ > /var/lib/maas/file_permissions.txt || true')
+            }
+        }
+        stage('Backup') {
+            def output = salt.getReturnValues(salt.cmdRun(pepperEnv, backupNode, "su root -c 'backupninja --now -d'")).readLines()[-2]
+            def outputPattern = java.util.regex.Pattern.compile("\\d+")
+            def outputMatcher = outputPattern.matcher(output)
+            if (outputMatcher.find()) {
+                try {
+                    result = outputMatcher.getAt([0, 1, 2, 3])
+                }
+                catch (Exception e) {
+                    common.errorMsg(e.getMessage())
+                    common.errorMsg("Parsing failed.")
+                    currentBuild.result = "FAILURE"
+                    return
+                }
+            }
+            if (result[1] != null && result[1] instanceof String && result[1].isInteger() && (result[1].toInteger() < 1)) {
+                common.successMsg("Backup successfully finished " + result[1] + " fatals, " + result[2] + " errors " + result[3] + " warnings.")
+            } else {
+                common.errorMsg("Backup failed. Found " + result[1] + " fatals, " + result[2] + " errors " + result[3] + " warnings.")
+                currentBuild.result = "FAILURE"
+                return
+            }
+        }
+    }
+}
diff --git a/backupninja-restore-pipeline.groovy b/backupninja-restore-pipeline.groovy
new file mode 100644
index 0000000..b58756e
--- /dev/null
+++ b/backupninja-restore-pipeline.groovy
@@ -0,0 +1,59 @@
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def pepperEnv = "pepperEnv"
+def maasNodes
+
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+        stage('Salt-Master restore') {
+            common.infoMsg('Verify pillar for salt-master backups')
+            try {
+                def masterPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:master:initial_data')
+                if(masterPillar['return'].isEmpty()) {
+                    throw new Exception('Problem with salt-master pillar.')
+                }
+                def minionPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
+                if(minionPillar['return'].isEmpty()) {
+                    throw new Exception('Problem with salt-minion pillar.')
+                }
+            }
+            catch (Exception e){
+                common.errorMsg(e.getMessage())
+                common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/salt-master/salt-master-restore.html')
+                return
+            }
+            maasNodes = salt.getMinions(pepperEnv, 'I@maas:region')
+            common.infoMsg('Performing restore')
+            salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.master.restore'])
+            salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.minion.restore'])
+            salt.fullRefresh(pepperEnv, '*')
+
+            common.infoMsg('Validating output')
+            common.infoMsg('Salt-Keys')
+            salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key")
+            common.infoMsg('Salt-master CA')
+            salt.cmdRun(pepperEnv, 'I@salt:master', "ls -la /etc/pki/ca/salt_master_ca/")
+        }
+        if (!maasNodes.isEmpty()) {
+            stage('MAAS Restore') {
+                common.infoMsg('Verify pillar for MaaS backup')
+                try {
+                    def maaSPillar = salt.getPillar(pepperEnv, "I@maas:region", 'maas:region:database:initial_data')
+                    if (maaSPillar['return'].isEmpty()) {
+                        throw new Exception('Problem with MaaS pillar.')
+                    }
+                }
+                catch (Exception e) {
+                    common.errorMsg(e.getMessage())
+                    common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/backupninja-postgresql/backupninja-postgresql-restore.html')
+                    return
+                }
+                salt.enforceState(['saltId': pepperEnv, 'target': 'I@maas:region', 'state': 'maas.region'])
+            }
+        }
+    }
+}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 08ebbd1..aa1a644 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -466,7 +466,7 @@
 
                 // Workaround for PROD-17765 issue to prevent crashes of keystone.role_present state.
                 // More details: https://mirantis.jira.com/browse/PROD-17765
-                salt.runSaltProcessStep(venvPepper, "I@keystone:client ${extra_tgt}", 'service.restart', ['salt-minion'])
+                salt.restartSaltMinion(venvPepper, "I@keystone:client ${extra_tgt}")
                 salt.minionsReachable(venvPepper, "I@salt:master and *01* ${extra_tgt}", 'I@keystone:client', null, 10, 6)
 
                 stage('Install OpenStack network') {
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 99c661c..0ea5fea 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -667,9 +667,8 @@
 def restoreGalera(pepperEnv) {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
-    def openstack = new com.mirantis.mk.Openstack()
-    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "rm -rf /var/lib/mysql/*")
-    openstack.restoreGaleraDb(pepperEnv)
+    def galera = new com.mirantis.mk.Galera()
+    galera.restoreGaleraDb(pepperEnv)
 }
 
 def backupZookeeper(pepperEnv) {
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index b9649d5..4a4a8b6 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -66,8 +66,10 @@
                     ] + env_vars
 
                 // Generating final config
+                def force_pull = (env.getProperty('force_pull')) ?: false
                 configRun = [
                     'image': IMAGE,
+                    'dockerPull': force_pull.toBoolean(),
                     'baseRepoPreConfig': false,
                     'dockerMaxCpus': 2,
                     'dockerExtraOpts' : [
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index c311186..c4351b9 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -74,6 +74,11 @@
         tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: default_node+'*'
         // TARGET_NODE will always override any settings above
         TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+        // default is /root/test/
+        runtest_tempest_cfg_dir = (env.runtest_tempest_cfg_dir) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0]
+        // default is tempest_generated.conf
+        runtest_tempest_cfg_name = (env.runtest_tempest_cfg_name) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_name')['return'][0].values()[0]
+        common.infoMsg("runtest_tempest_cfg is ${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}")
     }
     stage('Preparing resources') {
         if ( PREPARE_RESOURCES.toBoolean() ) {
@@ -92,11 +97,6 @@
     }
     stage('Generate config') {
         if ( GENERATE_CONFIG.toBoolean() ) {
-            // default is /root/test/
-            runtest_tempest_cfg_dir = (env.runtest_tempest_cfg_dir) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0]
-            // default is tempest_generated.conf
-            runtest_tempest_cfg_name = (env.runtest_tempest_cfg_name) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_name')['return'][0].values()[0]
-            common.infoMsg("runtest_tempest_cfg is ${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}")
             salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${runtest_tempest_cfg_dir}"])
             salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${runtest_tempest_cfg_dir}"])
             fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
@@ -128,9 +128,8 @@
             }
             SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
             if (SKIP_LIST_PATH) {
-                mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/root/tempest/skip.list"]
+                mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/var/lib/tempest/skiplists/skip.list"]
                 salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
-                args += ' --blacklist-file /root/tempest/skip.list '
             }
         }
         else {
@@ -171,7 +170,7 @@
             junit "${report_prefix}.xml"
         }
     } finally {
-        if (DEBUG_MODE == 'false') {
+        if ( ! DEBUG_MODE.toBoolean() ) {
             validate.runCleanup(saltMaster, TARGET_NODE, container_name)
         }
     }
diff --git a/galera-cluster-verify-restore.groovy b/galera-cluster-verify-restore.groovy
index e65257a..3faedc7 100644
--- a/galera-cluster-verify-restore.groovy
+++ b/galera-cluster-verify-restore.groovy
@@ -7,15 +7,19 @@
  *   ASK_CONFIRMATION           Ask confirmation for restore
  *   VERIFICATION_RETRIES       Number of restries to verify the restoration.
  *   CHECK_TIME_SYNC            Set to true to check time synchronization accross selected nodes.
+ *   RESTORE_TYPE               Sets restoration method
  *
 **/
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
-def openstack = new com.mirantis.mk.Openstack()
+def galera = new com.mirantis.mk.Galera()
 def python = new com.mirantis.mk.Python()
 def pepperEnv = "pepperEnv"
 def resultCode = 99
+def restoreType = env.RESTORE_TYPE
+def runRestoreDb = false
+def runBackupDb = false
 
 askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
 checkTimeSync = (env.getProperty('CHECK_TIME_SYNC') ?: true).toBoolean()
@@ -25,70 +29,117 @@
 } else {
     verificationRetries = 5
 }
+if (restoreType.equals("BACKUP_AND_RESTORE") || restoreType.equals("ONLY_RESTORE")) {
+    runRestoreDb = true
+}
+if (restoreType.equals("BACKUP_AND_RESTORE")) {
+    runBackupDb = true
+}
 
 timeout(time: 12, unit: 'HOURS') {
     node() {
         stage('Setup virtualenv for Pepper') {
             python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
-        stage('Verify status')
-            resultCode = openstack.verifyGaleraStatus(pepperEnv, false, checkTimeSync)
-        stage('Restore') {
+        stage('Verify status') {
+            def sysstatTargets = 'I@xtrabackup:client or I@xtrabackup:server'
+            def sysstatTargetsNodes = salt.getMinions(pepperEnv, sysstatTargets)
+            try {
+                if (!salt.isPackageInstalled(['saltId': pepperEnv, 'target': sysstatTargets, 'packageName': 'sysstat', 'output': false])) {
+                    if (askConfirmation) {
+                        input message: "Do you want to install 'sysstat' package on targeted nodes: ${sysstatTargetsNodes}? Click to confirm"
+                    }
+                    salt.runSaltProcessStep(pepperEnv, sysstatTargets, 'pkg.install', ['sysstat'])
+                }
+            } catch (Exception e) {
+                common.errorMsg("Unable to determine status of sysstat package on target nodes: ${sysstatTargetsNodes}.")
+                common.errorMsg(e.getMessage())
+                if (askConfirmation) {
+                    input message: "Do you want to continue? Click to confirm"
+                }
+            }
+            resultCode = galera.verifyGaleraStatus(pepperEnv, false, checkTimeSync)
             if (resultCode == 128) {
                 common.errorMsg("Unable to connect to Galera Master. Trying slaves...")
-                resultCode = openstack.verifyGaleraStatus(pepperEnv, true, checkTimeSync)
+                resultCode = galera.verifyGaleraStatus(pepperEnv, true, checkTimeSync)
                 if (resultCode == 129) {
-                    common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification and restoration.")
+                    common.errorMsg("Unable to obtain Galera slave minions list. Without fixing this issue, pipeline cannot continue in verification, backup and restoration. This may be caused by wrong Galera configuration or corrupted pillar data.")
                     currentBuild.result = "FAILURE"
                     return
                 } else if (resultCode == 130) {
-                    common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification and restoration.")
+                    common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification, backup and restoration. Is at least one member of the Galera cluster up and running?")
                     currentBuild.result = "FAILURE"
                     return
                 }
             }
             if (resultCode == 131) {
-                common.errorMsg("Time desynced - Click proceed when the issue is fixed or abort.")
+                common.errorMsg("Time desynced - Please fix this issue and rerun the pipeline.")
                 currentBuild.result = "FAILURE"
+                return
+            }
+            if (resultCode == 140 || resultCode == 141) {
+                common.errorMsg("Disk utilization check failed - Please fix this issue and rerun the pipeline.")
+                currentBuild.result = "FAILURE"
+                return
             }
             if (resultCode == 1) {
-                if(askConfirmation){
-                    common.warningMsg("There was a problem with parsing the status output or with determining it. Do you want to run a restore?")
+                if (askConfirmation) {
+                    input message: "There was a problem with parsing the status output or with determining it. Do you want to run a restore?"
                 } else {
                     common.warningMsg("There was a problem with parsing the status output or with determining it. Try to restore.")
                 }
             } else if (resultCode > 1) {
-                if(askConfirmation){
-                    common.warningMsg("There's something wrong with the cluster, do you want to run a restore?")
+                if (askConfirmation) {
+                    input message: "There's something wrong with the cluster, do you want to continue with backup and/or restore?"
                 } else {
-                    common.warningMsg("There's something wrong with the cluster, try to restore.")
+                    common.warningMsg("There's something wrong with the cluster, try to backup and/or restore.")
                 }
             } else {
-                if(askConfirmation){
-                  common.warningMsg("There seems to be everything alright with the cluster, do you still want to run a restore?")
+                if (askConfirmation) {
+                    input message: "There seems to be everything alright with the cluster, do you still want to continue with backup and/or restore?"
                 } else {
-                  common.warningMsg("There seems to be everything alright with the cluster, do nothing")
+                    common.warningMsg("There seems to be everything alright with the cluster, no backup and no restoration will be done.")
+                    currentBuild.result = "SUCCESS"
+                    return
                 }
             }
-            if(askConfirmation){
-              input message: "Are you sure you want to run a restore? Click to confirm"
-            }
-            try {
-                if((!askConfirmation && resultCode > 0) || askConfirmation){
-                  openstack.restoreGaleraDb(pepperEnv)
-                }
-            } catch (Exception e) {
-                common.errorMsg("Restoration process has failed.")
-            }
         }
-        stage('Verify restoration result') {
-            common.retry(verificationRetries, 15) {
-                exitCode = openstack.verifyGaleraStatus(pepperEnv, false)
-                if (exitCode >= 1) {
-                    error("Verification attempt finished with an error. This may be caused by cluster not having enough time to come up or to sync. Next verification attempt in 5 seconds.")
-                } else {
-                    common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
-                    currentBuild.result = "SUCCESS"
+        if (runBackupDb) {
+            if (askConfirmation) {
+                input message: "Are you sure you want to run a backup? Click to confirm"
+            }
+            stage('Backup') {
+                deployBuild = build(job: 'galera_backup_database', parameters: [
+                        [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL],
+                        [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+                        [$class: 'StringParameterValue', name: 'OVERRIDE_BACKUP_NODE', value: "none"],
+                ]
+                )
+            }
+        }
+        if (runRestoreDb) {
+            stage('Restore') {
+                if (askConfirmation) {
+                    input message: "Are you sure you want to run a restore? Click to confirm"
+                }
+                try {
+                    if ((!askConfirmation && resultCode > 0) || askConfirmation) {
+                        galera.restoreGaleraCluster(pepperEnv, runRestoreDb)
+                    }
+                } catch (Exception e) {
+                    common.errorMsg("Restoration process has failed.")
+                    common.errorMsg(e.getMessage())
+                }
+            }
+            stage('Verify restoration result') {
+                common.retry(verificationRetries, 15) {
+                    exitCode = galera.verifyGaleraStatus(pepperEnv, false, false)
+                    if (exitCode >= 1) {
+                        error("Verification attempt finished with an error. This may be caused by cluster not having enough time to come up or to sync. Next verification attempt in 5 seconds.")
+                    } else {
+                        common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
+                        currentBuild.result = "SUCCESS"
+                    }
                 }
             }
         }
diff --git a/galera-database-backup-pipeline.groovy b/galera-database-backup-pipeline.groovy
new file mode 100644
index 0000000..a6d0af5
--- /dev/null
+++ b/galera-database-backup-pipeline.groovy
@@ -0,0 +1,61 @@
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def galera = new com.mirantis.mk.Galera()
+def python = new com.mirantis.mk.Python()
+def pepperEnv = "pepperEnv"
+
+backupNode = "none"
+primaryNodes = []
+syncedNodes = []
+galeraMembers = []
+
+if (common.validInputParam('OVERRIDE_BACKUP_NODE')) {
+    backupNode = OVERRIDE_BACKUP_NODE
+}
+
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+        if (backupNode.equals("none")) {
+            stage('Locate Primary component') {
+                galeraMembers = salt.getMinions(pepperEnv, "I@galera:master or I@galera:slave")
+                for (member in galeraMembers) {          // STEP 1 - Locate all nodes that belong to Primary component
+                    try {
+                        salt.minionsReachable(pepperEnv, "I@salt:master", member)
+                        memberStatus = galera.getWsrepParameters(pepperEnv, member, "wsrep_cluster_status", false)
+                        if (memberStatus.get('wsrep_cluster_status').equals("Primary")) {
+                            primaryNodes.add(member)
+                            common.infoMsg("Adding ${member} as a member of a Primary component.")
+                        } else {
+                            common.warningMsg("Ignoring ${member} node, because it's not part of a Primary component.")
+                        }
+                    } catch (Exception e) {
+                        common.warningMsg("Minion '${member}' is not reachable or is not possible to determine its status.")
+                    }
+                }
+            }
+            stage('Choose backup node') {
+                backupNode = primaryNodes.sort()[0]                      // STEP 2 - Use node with lowest hostname number (last option if everything previous fails)
+            }
+        } else {
+            stage('Choose backup node') {
+                common.infoMsg("Backup node backup was overriden to ${backupNode}.")
+            }
+        }
+        stage ('Prepare for backup') {
+                salt.enforceState(pepperEnv, 'I@xtrabackup:server', ['linux.system.repo', 'xtrabackup'])
+                salt.enforceState(pepperEnv, 'I@xtrabackup:client', ['linux.system.repo', 'openssh.client'])
+        }
+        stage('Backup') {
+            common.infoMsg("Node ${backupNode} was selected as a backup node.")
+            input: "Please check selected backup node and confirm to run the backup procedure."
+            salt.cmdRun(pepperEnv, backupNode, "su root -c 'salt-call state.sls xtrabackup'")
+            salt.cmdRun(pepperEnv, backupNode, "su root -c '/usr/local/bin/innobackupex-runner.sh -s'")
+        }
+        stage('Clean-up') {
+            salt.cmdRun(pepperEnv, backupNode, "su root -c '/usr/local/bin/innobackupex-runner.sh -c'")
+        }
+    }
+}
\ No newline at end of file
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index fa49bbc..6f14866 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -5,6 +5,22 @@
     timeout(time: 12, unit: 'HOURS') {
         node() {
             try {
+                def sourceCreds = env.SOURCE_CREDENTIALS
+                if (sourceCreds && common.getCredentialsById(sourceCreds, 'password')) {
+                    withCredentials([
+                            [$class          : 'UsernamePasswordMultiBinding',
+                             credentialsId   : sourceCreds,
+                             passwordVariable: 'GIT_PASS',
+                             usernameVariable: 'GIT_USER']
+                    ]) {
+                        sh """
+                            set +x
+                            git config --global credential.${SOURCE_URL}.username \${GIT_USER}
+                            echo "echo \${GIT_PASS}" > askpass.sh && chmod +x askpass.sh
+                        """
+                        env.GIT_ASKPASS = "${env.WORKSPACE}/askpass.sh"
+                    }
+                }
                 if (BRANCHES == '*' || BRANCHES.contains('*')) {
                     branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
                 } else {
@@ -18,7 +34,8 @@
                 dir('source') {
                     checkout changelog: true, poll: true,
                         scm: [$class    : 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [],
+                              userRemoteConfigs: [[credentialsId: sourceCreds, url: SOURCE_URL]]]
                     git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, branches, true)
                 }
             } catch (Throwable e) {
@@ -26,6 +43,9 @@
                 currentBuild.result = 'FAILURE'
                 currentBuild.description = currentBuild.description ? e.message + '' + currentBuild.description : e.message
                 throw e
+            } finally {
+                sh "git config --global --unset credential.${SOURCE_URL}.username || true"
+                deleteDir()
             }
         }
     }
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index f2dd78c..5929390 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -32,6 +32,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify that compute/neutron agents on hosts are up.
  * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -153,6 +155,8 @@
     for (target in upgradeTargets){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
     }
@@ -173,6 +177,9 @@
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
           debian.osUpgradeNode(env, target, upgrade_mode, false)
         }
+        // Workaround for PROD-31413, install python-tornado from latest release if available and
+        // restart minion to apply new code.
+        salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
       }
 
       common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index 7458a27..e768564 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -31,6 +31,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify that compute/neutron agents on hosts are up.
  * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -138,6 +140,8 @@
     for (target in targetNodes){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
 
@@ -158,6 +162,9 @@
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
           debian.osUpgradeNode(env, target, upgrade_mode, false)
         }
+        // Workaround for PROD-31413, install python-tornado from latest release if available and
+        // restart minion to apply new code.
+        salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
       }
 
       common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-galera-upgrade.groovy b/openstack-galera-upgrade.groovy
new file mode 100644
index 0000000..f124051
--- /dev/null
+++ b/openstack-galera-upgrade.groovy
@@ -0,0 +1,206 @@
+/**
+ * Upgrade MySQL and Galera packages on dbs nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS            Credentials to the Salt API.
+ *   SALT_MASTER_URL                    Full Salt API address [http://10.10.10.15:6969].
+ *   SHUTDOWN_CLUSTER                   Shutdown all mysql instances on target nodes at the same time.
+ *   OS_DIST_UPGRADE                    Upgrade system packages including kernel (apt-get dist-upgrade).
+ *   OS_UPGRADE                         Upgrade all installed applications (apt-get upgrade)
+ *   TARGET_SERVERS                     Comma separated list of salt compound definitions to upgrade.
+ *   INTERACTIVE                        Ask interactive questions during pipeline run (bool).
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
+def galera = new com.mirantis.mk.Galera()
+def shutdownCluster = SHUTDOWN_CLUSTER.toBoolean()
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+  [
+    'Description': 'Only non destructive actions will be applied during this phase. Basic service verification will be performed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+    'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify MySQL is running and Galera cluster is operational.''',
+    'State result': 'Basic checks around wsrep Galera status are passed.'
+  ])
+
+upgradeStageMap.put('Stop MySQL service',
+  [
+    'Description': 'All MySQL services will be stopped on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * MySQL services are stopped.
+ * OpenStack APIs are not accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Stop MySQL services''',
+    'State result': 'MySQL service is stopped',
+  ])
+
+upgradeStageMap.put('Upgrade OS',
+  [
+    'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No workload downtime
+ * The nodes might be rebooted''',
+    'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * System packages are updated
+ * Node might be rebooted
+'''
+  ])
+
+upgradeStageMap.put('Upgrade MySQL server',
+   [
+    'Description': 'MySQL and Erlang code will be upgraded during this stage. No workload downtime is expected.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * OpenStack services loose connection to MySQL server
+ * No workload downtime''',
+    'Launched actions': '''
+ * Install new version of MySQL and Galera packages
+ * Render version of configs''',
+    'State result': '''
+ * MySQL packages are upgraded''',
+  ])
+
+upgradeStageMap.put('Start MySQL service',
+   [
+    'Description': 'All MySQL services will be running on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * MySQL service is running.
+ * OpenStack API are accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Start MySQL service''',
+    'State result': 'MySQL service is running',
+  ])
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+
+    stage('Setup virtualenv for Pepper') {
+      python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+    if (upgradeTargets.isEmpty()) {
+      error("No servers for upgrade matched by ${TARGET_SERVERS}")
+    }
+
+    def targetSecMapping = [:]
+    def secNoList = []
+    def out
+    def stopTargets = upgradeTargets.reverse()
+    common.printStageMap(upgradeStageMap)
+
+    if (interactive){
+      input message: common.getColorizedString(
+        "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+    }
+
+    for (target in upgradeTargets) {
+      salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+      salt.enforceState(env, target, ['linux.system.repo'])
+      common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
+      }
+    }
+
+    if (shutdownCluster){
+      for (target in stopTargets) {
+        common.stageWrapper(upgradeStageMap, "Stop MySQL service", target, interactive) {
+          openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+        }
+      }
+    }
+
+    for (target in upgradeTargets) {
+         out = salt.cmdRun(env, target,  'cat /var/lib/mysql/grastate.dat | grep "seqno" | cut -d ":" -f2', true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+         common.infoMsg("Get seqno: ${out} for node ${target}")
+         if (!out.isNumber()){
+             out = -2
+         }
+        targetSecMapping[out.toInteger()] = target
+        secNoList.add(out.toInteger())
+    }
+
+    def masterNode = targetSecMapping[secNoList.max()]
+    common.infoMsg("Master node is: ${masterNode}")
+
+    // Make sure we start upgrade always from master node
+    upgradeTargets.remove(masterNode)
+    upgradeTargets = [masterNode] + upgradeTargets
+    common.infoMsg("Upgrade targets are: ${upgradeTargets}")
+
+    for (target in upgradeTargets) {
+
+        common.stageWrapper(upgradeStageMap, "Stop MySQL service", target, interactive) {
+          openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+        }
+
+        common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+          if (OS_DIST_UPGRADE.toBoolean() == true){
+            upgrade_mode = 'dist-upgrade'
+          } else if (OS_UPGRADE.toBoolean() == true){
+            upgrade_mode = 'upgrade'
+          }
+          if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+            debian.osUpgradeNode(env, target, upgrade_mode, false)
+          }
+        }
+
+        common.stageWrapper(upgradeStageMap, "Upgrade MySQL server", target, interactive) {
+          openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+          openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+        }
+
+        if (shutdownCluster && target == masterNode){
+          //Start first node.
+          common.stageWrapper(upgradeStageMap, "Start MySQL service", target, interactive) {
+            galera.startFirstNode(env, target)
+          }
+        }
+
+        common.stageWrapper(upgradeStageMap, "Start MySQL service", target, interactive) {
+          openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+          openstack.runOpenStackUpgradePhase(env, target, 'verify')
+        }
+    }
+
+    // restart first node by applying state.
+
+    if (shutdownCluster) {
+      openstack.runOpenStackUpgradePhase(env, masterNode, 'render_config')
+      salt.cmdRun(env, masterNode, "service mysql reload")
+      openstack.runOpenStackUpgradePhase(env, masterNode, 'verify')
+    }
+
+    for (target in upgradeTargets) {
+      ensureClusterState = galera.getWsrepParameters(env, target, 'wsrep_evs_state')
+      if (ensureClusterState['wsrep_evs_state'] == 'OPERATIONAL') {
+        common.infoMsg('Node is in OPERATIONAL state.')
+      } else {
+        throw new Exception("Node is NOT in OPERATIONAL state.")
+      }
+    }
+  }
+}
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
index aabdafc..bc252da 100644
--- a/openstack-rabbitmq-upgrade.groovy
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -29,6 +29,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify rabbitmq is running and operational.''',
     'State result': 'Basic checks around services API are passed.'
@@ -114,6 +116,8 @@
     for (target in upgradeTargets){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
     }
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index b585e7e..7285c40 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -7,11 +7,20 @@
  *
 **/
 
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+
+def getValueForPillarKey(pepperEnv, target, pillarKey) {
+    def out = salt.getReturnValues(salt.getPillar(pepperEnv, target, pillarKey))
+    if (out == '') {
+        throw new Exception("Cannot get value for ${pillarKey} key on ${target} target")
+    }
+    return out.toString()
+}
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
 
@@ -23,59 +32,83 @@
             try {
                 salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
             } catch (Exception er) {
-                common.errorMsg("Opencontrail controllers health check stage found issues with services. Please take a look at the logs above.")
-                throw er
+                common.errorMsg("Opencontrail controllers health check stage found issues with currently running services.")
             }
         }
 
-        stage('Backup') {
-            salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'bash /usr/local/bin/cassandra-backup-runner-call.sh')
-        }
-
         stage('Restore') {
+            // stop neutron-server to prevent CRUD api calls to contrail-api service
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('neutron-server service already stopped')
+            }
             // get opencontrail version
-            def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
-            def contrailVersion = _pillar['return'][0].values()[0]
-            common.infoMsg("Contrail version is ${contrailVersion}")
-            if (contrailVersion >= 4) {
-                common.infoMsg("There will be steps for OC4.0 restore")
+            def contrailVersion = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version")
+            def configDbIp = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:host")
+            def configDbPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:port_configdb")
+            common.infoMsg("OpenContrail version is ${contrailVersion}")
+            if (contrailVersion.startsWith('4')) {
+                controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary",
+                        "docker:client:compose:opencontrail:service:controller:container_name")
+                common.infoMsg("Applying db restore procedure for OpenContrail 4.X version")
                 try {
-                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+                    common.infoMsg("Stop contrail control plane containers")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose down')
                 } catch (Exception err) {
-                    common.warningMsg('contrail-database already stopped? ' + err.getMessage())
+                    common.errorMsg('An error has been occurred during contrail containers shutdown: ' + err.getMessage())
+                    throw err
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller bash -c "for f in $(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/$f; done"')
+                    common.infoMsg("Cleanup cassandra data")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'for f in $(ls /var/lib/configdb/); do rm -r /var/lib/configdb/$f; done')
                 } catch (Exception err) {
-                    common.warningMsg('cassandra data already removed? ' + err.getMessage())
+                    common.errorMsg('Cannot cleanup cassandra data on control nodes: ' + err.getMessage())
+                    throw err
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+                    common.infoMsg("Start cassandra db on I@cassandra:backup:client node")
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
                 } catch (Exception err) {
-                    common.warningMsg('contrail-database already started? ' + err.getMessage())
+                    common.errorMsg('An error has been occurred during cassandra db startup on I@cassandra:backup:client node: ' + err.getMessage())
+                    throw err
                 }
-                // remove restore-already-happenned file if any is present
+                // wait for cassandra to be online
+                common.retry(6, 20){
+                    common.infoMsg("Trying to connect to casandra db on I@cassandra:backup:client node ...")
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "nc -v -z -w2 ${configDbIp} ${configDbPort}")
+                }
+                // remove restore-already-happened file if any is present
                 try {
-                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm  /var/backups/cassandra/dbrestored')
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
                 } catch (Exception err) {
                     common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
                 }
-                // perform actual backup
                 salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
-                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
-                sleep(5)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
-                // the lovely wait-60-seconds mantra before restarting supervisor-database service
-                sleep(60)
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+                } catch (Exception err) {
+                    common.errorMsg('An error has been occurred during cassandra db startup on I@opencontrail:control and not I@cassandra:backup:client nodes: ' + err.getMessage())
+                    throw err
+                }
                 // another mantra, wait till all services are up
                 sleep(60)
-            } else {
                 try {
-                    salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
-                } catch (Exception er) {
-                    common.warningMsg('neutron-server service already stopped')
+                    common.infoMsg("Start analytics containers node")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+                } catch (Exception err) {
+                    common.errorMsg('An error has been occurred during analytics containers startup: ' + err.getMessage())
+                    throw err
                 }
+                // contrail-control service needs to be restart after db sync to re-initialize with recovered data
+                try {
+                    common.infoMsg("Restart contrail-control services on control nodes")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller service contrail-control restart')
+                } catch (Exception err) {
+                    common.errorMsg('An error has been occurred during contrail-control services restart: ' + err.getMessage())
+                    throw err
+                }
+            } else {
                 try {
                     salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
                 } catch (Exception er) {
@@ -104,8 +137,7 @@
                     common.warningMsg('Directory already empty')
                 }
 
-                _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
-                def backupDir = _pillar['return'][0].values()[0] ?: '/var/backups/cassandra'
+                def backupDir = getValueForPillarKey(pepperEnv, "I@cassandra:backup:client", "cassandra:backup:backup_dir")
                 common.infoMsg("Backup directory is ${backupDir}")
                 salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backupDir}/dbrestored"], null, true)
 
@@ -127,7 +159,6 @@
                 sleep(5)
 
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
 
                 // wait until contrail-status is up
                 salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
@@ -135,11 +166,12 @@
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
             }
+
+            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
         }
 
         stage('Opencontrail controllers health check') {
-            common.retry(3, 20){
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
+            common.retry(9, 20){
                 salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
             }
         }
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 8d8e487..78765bb 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -49,8 +49,8 @@
 def verify_es_is_green(master) {
     common.infoMsg('Verify that the Elasticsearch cluster status is green')
     try {
-        def retries_wait = 20
-        def retries = 15
+        def retries_wait = 120
+        def retries = 60
 
         def elasticsearch_vip
         def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 72ac2d5..c26c229 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -1,10 +1,9 @@
 /**
- * Update packages on given nodes
+ * Update packages
  *
  * Expected parameters:
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
  *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
- *   TARGET_SERVERS             Salt compound target to match nodes to be updated [*, G@osfamily:debian].
  */
 
 pepperEnv = "pepperEnv"
@@ -19,12 +18,13 @@
 def command
 def commandKwargs
 def selMinions = []
+def check_mon
 
 def runCephCommand(master, target, cmd) {
     return salt.cmdRun(master, target, cmd)
 }
 
-def waitForHealthy(master, tgt, count=0, attempts=100) {
+def waitForHealthy(master, tgt, count = 0, attempts=100) {
     // wait for healthy cluster
     common = new com.mirantis.mk.Common()
     while (count<attempts) {
@@ -42,87 +42,62 @@
     node() {
         try {
 
+            def targets = ["common": "ceph-common", "osd": "ceph-osd", "mon": "ceph-mon",
+                          "mgr":"ceph-mgr", "radosgw": "radosgw"]
+
             stage('Setup virtualenv for Pepper') {
                 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
 
-            stage('List target servers') {
-                minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
-                if (minions.isEmpty()) {
-                    throw new Exception("No minion was targeted")
-                }
-
-                for (m  in minions) {
-                    if (m.startsWith("osd") || m.startsWith("cmn") || m.startsWith("rgw")) {
-                        selMinions.add(m)
-                    }
-                }
-            }
-
-
-
             stage('Apply package upgrades on all nodes') {
 
-                for (tgt in selMinions) {
-                    try {
-                        if (tgt.startsWith("osd")) {
-                            out = runCephCommand(pepperEnv, tgt, "apt install --only-upgrade ceph-osd -y")
-                            salt.printSaltCommandResult(out)
-                        } else if (tgt.startsWith("cmn")) {
-                            out = runCephCommand(pepperEnv, tgt, "apt install --only-upgrade ceph-mon -y")
-                            salt.printSaltCommandResult(out)
-                        } else if (tgt.startsWith("rgw")) {
-                            out = runCephCommand(pepperEnv, tgt, "apt install --only-upgrade radosgw -y")
-                            salt.printSaltCommandResult(out)
-                        }
-                    } catch (Throwable e) {
-                        if (e.message.contains("Unmet dependencies")) {
-                            out = runCephCommand(pepperEnv, tgt, "apt -f install -y")
-                            salt.printSaltCommandResult(out)
-                        } else {
-                            throw (e)
-                        }
-                    }
+                targets.each { key, value ->
+                   // try {
+                        command = "pkg.install"
+                        packages = value
+                        commandKwargs = ['only_upgrade': 'true','force_yes': 'true']
+                        target = "I@ceph:${key}"
+                        out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
+                        salt.printSaltCommandResult(out)
                 }
             }
 
             stage("Restart MONs and RGWs") {
+                selMinions = salt.getMinions(pepperEnv, "I@ceph:mon")
                 for (tgt in selMinions) {
-                    if (tgt.contains("cmn")) {
-                        runCephCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target")
-                        waitForHealthy(pepperEnv, tgt)
-                    } else if (tgt.contains("rgw")) {
-                        runCephCommand(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
-                        waitForHealthy(pepperEnv, tgt)
-                    }
+                    // runSaltProcessStep 'service.restart' don't work for this services
+                    runCephCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target")
+                    waitForHealthy(pepperEnv, tgt)
+                }
+                selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
+                for (tgt in selMinions) {
+                    runCephCommand(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
+                    waitForHealthy(pepperEnv, tgt)
                 }
             }
 
             stage('Restart OSDs') {
 
+                selMinions = salt.getMinions(pepperEnv, "I@ceph:osd")
                 for (tgt in selMinions) {
-                    if (tgt.contains("osd")) {
-                        salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
-                        def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+                    salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+                    def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
 
-                        def osd_ids = []
-                        for (i in ceph_disks) {
-                            def osd_id = i.getKey().toString()
-                            osd_ids.add('osd.' + osd_id)
-                        }
-
-                        runCephCommand(pepperEnv, tgt, 'ceph osd set noout')
-
-                        for (i in osd_ids) {
-
-                            salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
-                            // wait for healthy cluster
-                            waitForHealthy(pepperEnv, tgt)
-                        }
-
-                        runCephCommand(pepperEnv, tgt, 'ceph osd unset noout')
+                    def osd_ids = []
+                    for (i in ceph_disks) {
+                        def osd_id = i.getKey().toString()
+                        osd_ids.add('osd.' + osd_id)
                     }
+
+                    runCephCommand(pepperEnv, tgt, 'ceph osd set noout')
+
+                    for (i in osd_ids) {
+                        salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+                        // wait for healthy cluster
+                        waitForHealthy(pepperEnv, tgt)
+                    }
+
+                    runCephCommand(pepperEnv, tgt, 'ceph osd unset noout')
                 }
             }
 
@@ -134,4 +109,4 @@
             throw e
         }
     }
-}
\ No newline at end of file
+}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index b42ec7e..a809ba5 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -188,6 +188,26 @@
 
 }
 
+def wa32284(String clusterName) {
+    def clientGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:enabled").get("return")[0].values()[0]
+    def pkiGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:volumes:salt_pki").get("return")[0].values()[0]
+    def nginxEnabledAtMaster = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:enabled').get('return')[0].values()[0]
+    if (nginxEnabledAtMaster.toString().toLowerCase() == 'true' && clientGluster.toString().toLowerCase() == 'true' && pkiGluster) {
+        def nginxRequires = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:wait_for_service').get('return')[0].values()[0]
+        if (nginxRequires.isEmpty()) {
+            def nginxRequiresClassName = "cluster.${clusterName}.infra.config.nginx_requires_wa32284"
+            def nginxRequiresClassFile = "/srv/salt/reclass/classes/cluster/${clusterName}/infra/config/nginx_requires_wa32284.yml"
+            def nginxRequiresBlock = ['parameters': ['nginx': ['server': ['wait_for_service': ['srv-salt-pki.mount'] ] ] ] ]
+            def _tempFile = '/tmp/wa32284_' + UUID.randomUUID().toString().take(8)
+            writeYaml file: _tempFile , data: nginxRequiresBlock
+            def nginxRequiresBlockString = sh(script: "cat ${_tempFile}", returnStdout: true).trim()
+            salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${clusterName} && " +
+                "sed -i '/^parameters:/i - ${nginxRequiresClassName}' infra/config/init.yml")
+            salt.cmdRun(venvPepper, 'I@salt:master', "echo '${nginxRequiresBlockString}'  > ${nginxRequiresClassFile}", false, null, false)
+        }
+    }
+}
+
 def archiveReclassInventory(filename) {
     def _tmp_file = '/tmp/' + filename + UUID.randomUUID().toString().take(8)
     // jenkins may fail at overheap. Compress data with gzip like WA
@@ -307,6 +327,10 @@
             }
             python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
             def minions = salt.getMinions(venvPepper, '*')
+            def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
+            if (cluster_name == '' || cluster_name == 'null' || cluster_name == null) {
+                error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
+            }
 
             stage('Update Reclass and Salt-Formulas') {
                 common.infoMsg('Perform: Full salt sync')
@@ -317,7 +341,6 @@
                 common.infoMsg('Perform: archiveReclassInventory before upgrade')
                 archiveReclassInventory(inventoryBeforeFilename)
 
-                def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', '_param:cluster_name').get('return')[0].values()[0]
                 try {
                     salt.cmdRun(venvPepper, 'I@salt:master', 'cd /srv/salt/reclass/ && git status && git diff-index --quiet HEAD --')
                 }
@@ -347,6 +370,29 @@
                         "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.updates/system.linux.system.repo.mcp.apt_mirantis.update/g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
                         "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
+
+                    // Switch Jenkins/Gerrit to use LDAP SSL/TLS
+                    def gerritldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                        "grep -r --exclude-dir=aptly 'gerrit_ldap_server: .*' * | grep -Po 'gerrit_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                    if (gerritldapURI.startsWith('ldap://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+                    } else if (! gerritldapURI.startsWith('ldaps://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|gerrit_ldap_server: .*|gerrit_ldap_server: \"ldaps://${gerritldapURI}\"|g'")
+                    }
+                    def jenkinsldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                        "grep -r --exclude-dir=aptly 'jenkins_security_ldap_server: .*' * | grep -Po 'jenkins_security_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                    if (jenkinsldapURI.startsWith('ldap://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+                    } else if (! jenkinsldapURI.startsWith('ldaps://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|jenkins_security_ldap_server: .*|jenkins_security_ldap_server: \"ldaps://${jenkinsldapURI}\"|g'")
+                    }
+
+                    wa32284(cluster_name)
+
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
                     // Add kubernetes-extra repo
                     if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
@@ -394,7 +440,7 @@
                 try {
                     common.infoMsg('Perform: UPDATE Salt Formulas')
                     salt.fullRefresh(venvPepper, '*')
-                    salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['linux.system.repo'], read_timeout: 60, retries: 2])
+                    salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo', true, true, null, false, 60, 2)
                     def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
                     salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas", 'salt.master.env'])
                     salt.fullRefresh(venvPepper, '*')
@@ -417,9 +463,9 @@
                 }
 
                 salt.fullRefresh(venvPepper, 'I@salt:master')
-                salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['reclass.storage'], read_timeout: 60, retries: 2])
+                salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true, true, null, false, 60, 2)
                 try {
-                    salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['reclass'], read_timeout: 60, retries: 2])
+                    salt.enforceState(venvPepper, 'I@salt:master', 'reclass', true, true, null, false, 60, 2)
                 }
                 catch (Exception ex) {
                     common.errorMsg(ex.toString())
@@ -489,7 +535,7 @@
                 if (upgradeSaltStack) {
                     updateSaltStack('I@salt:master', '["salt-master", "salt-common", "salt-api", "salt-minion"]')
 
-                    salt.enforceState([saltId: venvPepper, target: 'I@linux:system', state: ['linux.system.repo'], read_timeout: 60, retries: 2])
+                    salt.enforceState(venvPepper, 'I@linux:system', 'linux.system.repo', true, true, null, false, 60, 2)
                     updateSaltStack('I@salt:minion and not I@salt:master', '["salt-minion"]')
                 }
 
@@ -502,25 +548,27 @@
                 // update minions certs
                 // call for `salt.minion.ca` state on related nodes to make sure
                 // mine was updated with required data after salt-minion/salt-master restart salt:minion:ca
-                salt.enforceState([saltId: venvPepper, target: 'I@salt:minion:ca', state: ['salt.minion.ca'], read_timeout: 60, retries: 2])
-                salt.enforceState([saltId: venvPepper, target: 'I@salt:minion', state: ['salt.minion.cert'], read_timeout: 60, retries: 2])
+                salt.enforceState(venvPepper, 'I@salt:minion:ca', 'salt.minion.ca', true, true, null, false, 60, 2)
+                salt.enforceState(venvPepper, 'I@salt:minion', 'salt.minion.cert', true, true, null, false, 60, 2)
 
                 // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
-                salt.enforceState([saltId: venvPepper, target: 'I@salt:minion', state: ['salt.minion'], read_timeout: 60, retries: 2])
+                salt.enforceState(venvPepper, 'I@salt:minion', 'salt.minion', true, true, null, false, 60, 2)
                 // Retry needed only for rare race-condition in user appearance
                 common.infoMsg('Perform: updating users and keys')
-                salt.enforceState([saltId: venvPepper, target: 'I@linux:system', state: ['linux.system.user'], read_timeout: 60, retries: 2])
+                salt.enforceState(venvPepper, 'I@linux:system', 'linux.system.user', true, true, null, false, 60, 2)
                 common.infoMsg('Perform: updating openssh')
-                salt.enforceState([saltId: venvPepper, target: 'I@linux:system', state: ['openssh'], read_timeout: 60, retries: 2])
+                salt.enforceState(venvPepper, 'I@linux:system', 'openssh', true, true, null, false, 60, 2)
 
                 // apply salt API TLS if needed
                 def nginxAtMaster = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:enabled').get('return')[0].values()[0]
                 if (nginxAtMaster.toString().toLowerCase() == 'true') {
-                    salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['nginx'], read_timeout: 60, retries: 2])
+                    salt.enforceState(venvPepper, 'I@salt:master', 'nginx', true, true, null, false, 60, 2)
                 }
 
-                salt.enforceState([saltId: venvPepper, target: 'I@jenkins:client and not I@salt:master', state: ['jenkins.client'], read_timeout: 60, retries: 2])
-                salt.cmdRun(venvPepper, 'I@salt:master', "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
+                // Apply changes for HaProxy on CI/CD nodes
+                salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
+
+                salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
 
                 sleep(180)
 
@@ -534,6 +582,13 @@
                 catch (Exception ex) {
                     error("Docker containers for CI/CD services are having troubles with starting.")
                 }
+
+                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true, true, null, false, 60, 2)
+
+                // update Nginx proxy settings for Jenkins/Gerrit if needed
+                if (salt.testTarget(venvPepper, 'I@nginx:server:site:nginx_proxy_jenkins and I@nginx:server:site:nginx_proxy_gerrit')) {
+                    salt.enforceState(venvPepper, 'I@nginx:server:site:nginx_proxy_jenkins and I@nginx:server:site:nginx_proxy_gerrit', 'nginx.server', true, true, null, false, 60, 2)
+                }
             }
         }
         catch (Throwable e) {