Merge "update cluster healthchecks during ceph upgrade process"
diff --git a/backupninja-backup-pipeline.groovy b/backupninja-backup-pipeline.groovy
index 45812a4..80467d4 100644
--- a/backupninja-backup-pipeline.groovy
+++ b/backupninja-backup-pipeline.groovy
@@ -35,7 +35,7 @@
                 salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:client', 'state': 'backupninja'])
         }
         stage('Backup') {
-            output = salt.getReturnValues(salt.cmdRun(pepperEnv, backupNode, "su root -c 'backupninja --now -d'")).readLines()[-2]
+            def output = salt.getReturnValues(salt.cmdRun(pepperEnv, backupNode, "su root -c 'backupninja --now -d'")).readLines()[-2]
             def outputPattern = java.util.regex.Pattern.compile("\\d+")
             def outputMatcher = outputPattern.matcher(output)
               if (outputMatcher.find()) {
@@ -49,13 +49,13 @@
                     return
                   }
             }
-            if (result[1] == 0 || result == ""){
-                common.errorMsg("Backup failed.")
-                currentBuild.result = "FAILURE"
-                return
+            if (result[1] != null && result[1] instanceof String && result[1].isInteger() && (result[1].toInteger() < 1)){
+              common.successMsg("Backup successfully finished " + result[1] + " fatals, " + result[2] + " errors " + result[3] +" warnings.")
             }
             else {
-              common.successMsg("Backup successfully finished " + result[1] + " fatals, " + result[2] + " errors " + result[3] +" warnings")
+                common.errorMsg("Backup failed. Found " + result[1] + " fatals, " + result[2] + " errors " + result[3] +" warnings.")
+                currentBuild.result = "FAILURE"
+                return
             }
         }
     }
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index b362f01..1b1d5e0 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -49,7 +49,7 @@
                 def workdir = "${container_workdir}/${test_suite_name}"
                 def xml_file = "${test_suite_name}_report.xml"
                 def tests_set = (env.getProperty('tests_set')) ?: ''
-                def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv -vv ${tests_set}"
+                def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -vv ${tests_set}"
 
                 sh "mkdir -p ${artifacts_dir}"
 
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index c6fca0a..e8eb286 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -33,9 +33,9 @@
 if (extraYamlContext) {
     common.mergeEnv(env, extraYamlContext) }
 def SALT_MASTER_CREDENTIALS=(env.SALT_MASTER_CREDENTIALS) ?: 'salt'
-def VERBOSE = (env.VERBOSE) ?: true
+def VERBOSE = (env.VERBOSE) ? env.VERBOSE.toBoolean() : true
 def DEBUG_MODE = (env.DEBUG_MODE) ?: false
-def STOP_ON_ERROR = (env.STOP_ON_ERROR) ?: false
+def STOP_ON_ERROR = (env.STOP_ON_ERROR) ? env.STOP_ON_ERROR.toBoolean() : false
 def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
 def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
 def report_prefix = (env.report_prefix) ?: ''
@@ -89,7 +89,7 @@
             }
         }
         stage('Generate config') {
-            if ( GENERATE_CONFIG ) {
+            if ( GENERATE_CONFIG.toBoolean() ) {
                 salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
                 salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
                 fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
@@ -127,9 +127,9 @@
         }
 
         stage('Run Tempest tests') {
-            // parameters: master, target, dockerImageLink, name, env_var, entrypoint, tempestConfLocalPath
-            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', [], true,
-                                  '/root/test/tempest_generated.conf')
+            mounts = ['/root/test/tempest_generated.conf': '/etc/tempest/tempest.conf']
+            validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
+                                  mounts: mounts)
             report_prefix += 'tempest_'
             if (env.concurrency) {
                 args += ' -w ' + env.concurrency
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index e7887f9..7cbdfa0 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -31,7 +31,6 @@
     giveVerify = true
 }
 
-
 timeout(time: 12, unit: 'HOURS') {
     node(slaveNode) {
         try {
@@ -42,59 +41,58 @@
             def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
             def doSubmit = false
             def skipProjectsVerify = ['mk/docker-jnlp-slave']
+
             stage("test") {
-                if (gerritChange.status != "MERGED" && !env.SKIP_TEST.toBoolean()) {
-                    // test max CodeReview
-                    if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
-                        doSubmit = true
-                        def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
-                        def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
-                        if (gerritProject in skipProjectsVerify) {
-                            common.successMsg("Project ${gerritProject} doesn't require verify, skipping...")
-                            giveVerify = true
+                //check Code-Review
+                if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
+                    throw new Exception('Change don\'t have a CodeReview+1, reject gate')
+                }
+                //check Verify
+                if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")) {
+                    throw new Exception('Change don\'t have initial Verify+1, reject gate')
+                } else if (gerritChange.status != "MERGED" && !env.SKIP_TEST.toBoolean()) {
+                    //Verify-label off
+                    ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s --verified 0", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+                    //Do stage (test)
+                    doSubmit = true
+                    def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
+                    def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
+                    if (gerritProject in skipProjectsVerify) {
+                        common.successMsg("Project ${gerritProject} doesn't require verify, skipping...")
+                        giveVerify = true
+                    } else {
+                        def jobsNamespace = JOBS_NAMESPACE
+                        def plural_namespaces = ['salt-formulas', 'salt-models']
+                        // remove plural s on the end of job namespace
+                        if (JOBS_NAMESPACE in plural_namespaces) {
+                            jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+                        }
+                        // salt-formulas tests have -latest on end of the name
+                        if (JOBS_NAMESPACE.equals("salt-formulas")) {
+                            gerritProject = gerritProject + "-latest"
+                        }
+                        def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+                        if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+                            callJobWithExtraVars('test-salt-model-ci-wrapper')
                         } else {
-                            def jobsNamespace = JOBS_NAMESPACE
-                            def plural_namespaces = ['salt-formulas', 'salt-models']
-                            // remove plural s on the end of job namespace
-                            if (JOBS_NAMESPACE in plural_namespaces) {
-                                jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
-                            }
-                            // salt-formulas tests have -latest on end of the name
-                            if (JOBS_NAMESPACE.equals("salt-formulas")) {
-                                gerritProject = gerritProject + "-latest"
-                            }
-                            def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
-                            if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
-                                callJobWithExtraVars('test-salt-model-ci-wrapper')
+                            if (isJobExists(testJob)) {
+                                common.infoMsg("Test job ${testJob} found, running")
+                                def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+                                build job: testJob, parameters: [
+                                    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+                                    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+                                ]
+                                giveVerify = true
                             } else {
-                                if (isJobExists(testJob)) {
-                                    common.infoMsg("Test job ${testJob} found, running")
-                                    def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
-                                    if (JOBS_NAMESPACE.equals("salt-formulas")) {
-                                        build job: testJob, parameters: [
-                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
-                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC],
-                                            [$class: 'StringParameterValue', name: 'GATING_GERRIT_BRANCH', value: GERRIT_BRANCH]
-                                        ]
-                                    } else {
-                                        build job: testJob, parameters: [
-                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
-                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
-                                        ]
-                                    }
-                                    giveVerify = true
-                                } else {
-                                    common.infoMsg("Test job ${testJob} not found")
-                                }
+                                common.infoMsg("Test job ${testJob} not found")
                             }
                         }
-                    } else {
-                        common.errorMsg("Change don't have a CodeReview, skipping gate")
                     }
                 } else {
-                    common.infoMsg("Test job skipped")
+                    common.infoMsg('Test job skipped')
                 }
             }
+
             stage("submit review") {
                 if (gerritChange.status == "MERGED") {
                     common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 54e9853..3783331 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -15,6 +15,7 @@
 git = new com.mirantis.mk.Git()
 python = new com.mirantis.mk.Python()
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
+updateSaltFormulasDuringTest = true
 
 slaveNode = env.getProperty('SLAVE_NODE') ?: 'virtual'
 gerritCredentials = env.getProperty('CREDENTIALS_ID') ?: 'gerrit'
@@ -22,6 +23,49 @@
 distribRevision = 'proposed'
 gitGuessedVersion = false
 
+def GenerateModelToxDocker(Map params) {
+    def ccRoot = params['ccRoot']
+    def context = params['context']
+    def outDir = params['outDir']
+    def envOpts = params['envOpts']
+    def tempContextFile = new File(ccRoot, 'tempContext.yaml_' + UUID.randomUUID().toString()).toString()
+    writeFile file: tempContextFile, text: context
+    // Get Jenkins user UID and GID
+    def jenkinsUID = sh(script: 'id -u', returnStdout: true).trim()
+    def jenkinsGID = sh(script: 'id -g', returnStdout: true).trim()
+    /*
+        by default, process in image operates via root user
+        Otherwise, gpg key for model and all files managed by jenkins user
+        To make it compatible, install rrequirementfrom user, but generate model via jenkins
+        for build use upstream Ubuntu Bionic image
+    */
+    def configRun = ['distribRevision': 'nightly',
+                     'envOpts'        : envOpts + ["CONFIG_FILE=$tempContextFile",
+                                                   "OUTPUT_DIR=${outDir}"
+                     ],
+                     'image': 'docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jnlp-slave',
+                     'runCommands'    : [
+                         '001_prepare_generate_auto_reqs': {
+                            sh('''
+                                pip install tox
+                                ''')
+                         },
+                         // user & group can be different on host and in docker
+                         '002_set_jenkins_id': {
+                            sh("""
+                                usermod -u ${jenkinsUID} jenkins
+                                groupmod -g ${jenkinsUID} jenkins
+                                """)
+                         },
+                         '003_run_generate_auto': {
+                             print('[Cookiecutter build] Result:\n' +
+                                 sh(returnStdout: true, script: 'cd ' + ccRoot + '; su jenkins -c "tox -ve generate_auto" '))
+                         }
+                     ]
+    ]
+
+    saltModelTesting.setupDockerAndTest(configRun)
+}
 
 def globalVariatorsUpdate() {
     def templateContext = readYaml text: env.COOKIECUTTER_TEMPLATE_CONTEXT
@@ -81,8 +125,11 @@
         common.warningMsg('Apply WA for PROD-25732')
         context.cookiecutter_template_url = 'ssh://gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git'
     }
-    common.warningMsg("Fetching:\n" +
-        "DISTRIB_REVISION from ${distribRevision}")
+    // check, if we are going to test clear release version, w\o any updates and patches
+    if (!gitGuessedVersion && (distribRevision == context.mcp_version)) {
+        updateSaltFormulasDuringTest = false
+    }
+
     common.infoMsg("Using context:\n" + context)
     print prettyPrint(toJson(context))
     return context
@@ -138,17 +185,22 @@
             stage('Generate model') {
                 // GNUPGHOME environment variable is required for all gpg commands
                 // and for python.generateModel execution
-                withEnv(["GNUPGHOME=${env.WORKSPACE}/gpghome"]) {
+                def envOpts = ["GNUPGHOME=${env.WORKSPACE}/gpghome"]
+                withEnv(envOpts) {
                     if (context['secrets_encryption_enabled'] == 'True') {
                         sh "mkdir gpghome; chmod 700 gpghome"
                         def secretKeyID = RequesterEmail ?: "salt@${context['cluster_domain']}".toString()
                         if (!context.get('secrets_encryption_private_key')) {
                             def batchData = """
+                                %echo Generating a basic OpenPGP key for Salt-Master
+                                %no-protection
                                 Key-Type: 1
                                 Key-Length: 4096
                                 Expire-Date: 0
                                 Name-Real: ${context['salt_master_hostname']}.${context['cluster_domain']}
                                 Name-Email: ${secretKeyID}
+                                %commit
+                                %echo done
                             """.stripIndent()
                             writeFile file: 'gpg-batch.txt', text: batchData
                             sh "gpg --gen-key --batch < gpg-batch.txt"
@@ -156,7 +208,7 @@
                         } else {
                             writeFile file: 'gpgkey.asc', text: context['secrets_encryption_private_key']
                             sh "gpg --import gpgkey.asc"
-                            secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | awk -F: -e "/^sec/{print \\$5; exit}"').trim()
+                            secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | grep -E "^sec" | awk -F: \'{print \$5}\'').trim()
                         }
                         context['secrets_encryption_key_id'] = secretKeyID
                     }
@@ -176,7 +228,10 @@
                         // still expect only lower lvl of project, aka model/classes/cluster/XXX/. So,lets dump result into
                         // temp dir, and then copy it over initial structure.
                         reclassTempRootDir = sh(script: "mktemp -d -p ${env.WORKSPACE}", returnStdout: true).trim()
-                        python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, reclassTempRootDir, templateEnv, false)
+                        GenerateModelToxDocker(['context': common2.dumpYAML(['default_context': context]),
+                                                'ccRoot' : templateEnv,
+                                                'outDir' : reclassTempRootDir,
+                                                'envOpts': envOpts])
                         dir(modelEnv) {
                             common.warningMsg('Forming reclass-root structure...')
                             sh("cp -ra ${reclassTempRootDir}/reclass/* .")
@@ -194,7 +249,9 @@
                         sh("cp -v gpgkey.asc ${testEnv}/salt_master_pillar.asc")
                     }
                     def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-                    common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
+                    common.warningMsg("Attempt to run test against:\n" +
+                        "DISTRIB_REVISION from ${distribRevision}\n" +
+                        "updateSaltFormulasDuringTest = ${updateSaltFormulasDuringTest}")
                     try {
                         def config = [
                             'dockerHostname'     : "${context['salt_master_hostname']}",
@@ -203,7 +260,8 @@
                             'distribRevision'    : distribRevision,
                             'dockerContainerName': DockerCName,
                             'testContext'        : 'salt-model-node',
-                            'dockerExtraOpts'    : ['--memory=3g']
+                            'dockerExtraOpts'    : ['--memory=3g'],
+                            'updateSaltFormulas' : updateSaltFormulasDuringTest
                         ]
                         testResult = saltModelTesting.testNode(config)
                         common.infoMsg("Test finished: SUCCESS")
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
new file mode 100644
index 0000000..aabdafc
--- /dev/null
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -0,0 +1,155 @@
+/**
+ * Upgrade RabbitMQ packages on msg nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS            Credentials to the Salt API.
+ *   SALT_MASTER_URL                    Full Salt API address [http://10.10.10.15:6969].
+ *   OS_DIST_UPGRADE                    Upgrade system packages including kernel (apt-get dist-upgrade)
+ *   OS_UPGRADE                         Upgrade all installed applications (apt-get upgrade)
+ *   TARGET_SERVERS                     Comma separated list of salt compound definitions to upgrade.
+ *   INTERACTIVE                        Ask interactive questions during pipeline run (bool).
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
+
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+  [
+    'Description': 'Only non destructive actions will be applied during this phase. Basic service verification will be performed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+    'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify rabbitmq is running and operational.''',
+    'State result': 'Basic checks around services API are passed.'
+  ])
+
+upgradeStageMap.put('Stop RabbitMQ service',
+  [
+    'Description': 'All rabbitmq services will be stopped on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * RabbitMQ services are stopped.
+ * OpenStack APIs are not accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Stop RabbitMQ services''',
+    'State result': 'RabbitMQ service is stopped',
+  ])
+
+upgradeStageMap.put('Upgrade OS',
+  [
+    'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No workload downtime
+ * The nodes might be rebooted''',
+    'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * System packages are updated
+ * Node might be rebooted
+'''
+  ])
+
+upgradeStageMap.put('Upgrade RabbitMQ server',
+   [
+    'Description': 'RabbitMQ and Erlang code will be upgraded during this stage. No workload downtime is expected.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * OpenStack services loose connection to rabbitmq-server
+ * No workload downtime''',
+    'Launched actions': '''
+ * Install new version of RabbitMQ and Erlang packages
+ * Render version of configs''',
+    'State result': '''
+ * RabbitMQ packages are upgraded''',
+  ])
+
+upgradeStageMap.put('Start RabbitMQ service',
+   [
+    'Description': 'All rabbitmq services will be running on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * RabbitMQ service is running.
+ * OpenStack API are accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Start RabbitMQ service''',
+    'State result': 'RabbitMQ service is running',
+  ])
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+
+    stage('Setup virtualenv for Pepper') {
+      python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+    if (upgradeTargets.isEmpty()) {
+      error("No servers for upgrade matched by ${TARGET_SERVERS}")
+    }
+
+    def stopTargets = upgradeTargets.reverse()
+
+    common.printStageMap(upgradeStageMap)
+    if (interactive){
+      input message: common.getColorizedString(
+        "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+    }
+
+    for (target in upgradeTargets){
+      common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
+      }
+    }
+
+    for (target in stopTargets) {
+      common.stageWrapper(upgradeStageMap, "Stop RabbitMQ service", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+        if (OS_DIST_UPGRADE.toBoolean() == true){
+          upgrade_mode = 'dist-upgrade'
+        } else if (OS_UPGRADE.toBoolean() == true){
+          upgrade_mode = 'upgrade'
+        }
+        if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+          debian.osUpgradeNode(env, target, upgrade_mode, false)
+        }
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Upgrade RabbitMQ server", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+        openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Start RabbitMQ service", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+        openstack.applyOpenstackAppsStates(env, target)
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
+      }
+    }
+  }
+}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index e389937..d1614eb 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -455,6 +455,8 @@
                 // mine was updated with required data after salt-minion/salt-master restart salt:minion:ca
                 salt.enforceState(venvPepper, "I@salt:minion:ca", 'salt.minion.ca', true)
                 salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
+                // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
+                salt.enforceState([saltId: venvPepper, target: "I@salt:minion ${extra_tgt}", state: ['salt.minion'], read_timeout: 60, retries: 2])
                 // updating users and keys
                 salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)
                 salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)