Merge "Add openstack-galera-upgrade.groovy pipeline"
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index d64e057..b9649d5 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -23,7 +23,6 @@
 /*
 YAML example
 =====
-
 # commands is a map of commands which looks like step_name: shell_command
 commands:
   001_prepare: rm /var/lib/g.txt
@@ -35,19 +34,19 @@
   - SALT_USERNAME=admin
   - SALT_PASSWORD=password
   - drivetrain_version=testing
-
 */
 
 node (SLAVE_NODE) {
     def artifacts_dir = 'validation_artifacts'
+    def test_suite_name = "${env.JOB_NAME}"
+    def xml_file = "${test_suite_name}_report.xml"
+
     def configRun = [:]
     try {
         withEnv(env_vars) {
             stage('Initialization') {
                 def container_workdir = '/var/lib'
-                def test_suite_name = "${env.JOB_NAME}"
                 def workdir = "${container_workdir}/${test_suite_name}"
-                def xml_file = "${test_suite_name}_report.xml"
                 def tests_set = (env.getProperty('tests_set')) ?: ''
                 def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -vv ${tests_set}"
 
@@ -95,28 +94,28 @@
                         style: 'line',
                         title: 'SPT Glance results',
                         xmlSeries: [[
-                        file: "${env.JOB_NAME}_report.xml",
+                        file: "${artifacts_dir}/${xml_file}",
                         nodeType: 'NODESET',
                         url: '',
-                        xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+                        xpath: '/testsuite/testcase[@classname="tests.test_glance"]/properties/property']]
                     plot csvFileName: 'plot-hw2hw.csv',
                         group: 'SPT',
                         style: 'line',
                         title: 'SPT HW2HW results',
                         xmlSeries: [[
-                        file: "${env.JOB_NAME}_report.xml",
+                        file: "${artifacts_dir}/${xml_file}",
                         nodeType: 'NODESET',
                         url: '',
-                        xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+                        xpath: '/testsuite/testcase[@classname="tests.test_hw2hw"]/properties/property']]
                     plot csvFileName: 'plot-vm2vm.csv',
                         group: 'SPT',
                         style: 'line',
                         title: 'SPT VM2VM results',
                         xmlSeries: [[
-                        file: "${env.JOB_NAME}_report.xml",
+                        file: "${artifacts_dir}/${xml_file}",
                         nodeType: 'NODESET',
                         url: '',
-                        xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+                        xpath: '/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
                 }
             }
         }
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index b0e12e9..d8087b3 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -19,6 +19,8 @@
  *   TEMPEST_ENDPOINT_TYPE       Type of OS endpoint to use during test run (not in use right now)
  *   concurrency                 Number of threads to use for Tempest test run
  *   remote_artifacts_dir        Folder to use for artifacts on remote node
+ *   runtest_tempest_cfg_dir     Folder to use to generate and store tempest.conf
+ *   runtest_tempest_cfg_name    Tempest config name
  *   report_prefix               Some prefix to put to report name
  *
  */
@@ -37,91 +39,109 @@
 def DEBUG_MODE = (env.DEBUG_MODE) ?: false
 def STOP_ON_ERROR = (env.STOP_ON_ERROR) ? env.STOP_ON_ERROR.toBoolean() : false
 def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+// do not change unless you know what you're doing
 def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
 def report_prefix = (env.report_prefix) ?: ''
 def args = ''
+def mounts = [:]
 node() {
-    try{
-        stage('Initialization') {
-            deleteDir()
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            container_name = "${env.JOB_NAME}"
-            cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
-            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
-            if (os_version == '') {
-                throw new Exception("Openstack is not found on this env. Exiting")
-            }
-            TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
-            runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
-            if (runtest_node.values()[0]) {
-                // Let's use Service node that was defined in reclass. If several nodes are defined
-                // we will use the first from salt output
-                common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
-                SERVICE_NODE = runtest_node.keySet()[0]
-            }
-            else {
-                throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
-                                    "into reclass. Check documentation for more details")
-            }
-            common.infoMsg('Refreshing pillars on service node')
-            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
-            tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
+    stage('Initialization') {
+        deleteDir()
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        container_name = "${env.JOB_NAME}"
+        cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+        os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+        if (!os_version) {
+            throw new Exception("Openstack is not found on this env. Exiting")
         }
-        stage('Preparing resources') {
-            if ( PREPARE_RESOURCES.toBoolean() ) {
-                common.infoMsg('Running salt.minion state on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
-                common.infoMsg('Running keystone.client on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
-                common.infoMsg('Running glance.client on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
-                common.infoMsg('Running nova.client on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
-            }
-            else {
-                common.infoMsg('Skipping resources preparation')
-            }
+        TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+        runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+        if (runtest_node.values()[0]) {
+            // Let's use Service node that was defined in reclass. If several nodes are defined
+            // we will use the first from salt output
+            common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+            SERVICE_NODE = runtest_node.keySet()[0]
         }
-        stage('Generate config') {
-            if ( GENERATE_CONFIG.toBoolean() ) {
-                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
-                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
-                fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
-                TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
-                if (TARGET_NODE != tempest_node) {
-                    common.infoMsg("TARGET_NODE is defined in Jenkins")
-                    def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
-                    common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
-                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
-                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
-                    salt.checkResult(result)
-                }
-                common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
-                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
-                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+        else {
+            throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+                                "into reclass. Check documentation for more details")
+        }
+        common.infoMsg('Refreshing pillars on service node')
+        salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+        // default node is cid01 (preferably) or cfg01
+        default_node=salt.getPillar(saltMaster, 'I@salt:master', '_param:cicd_control_node01_hostname')['return'][0].values()[0] ?: 'cfg01'
+        // fetch tempest_test_target from runtest.yaml, otherwise fallback to default_node
+        tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: default_node+'*'
+        // TARGET_NODE will always override any settings above
+        TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+        // default is /root/test/
+        runtest_tempest_cfg_dir = (env.runtest_tempest_cfg_dir) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0]
+        // default is tempest_generated.conf
+        runtest_tempest_cfg_name = (env.runtest_tempest_cfg_name) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_name')['return'][0].values()[0]
+        common.infoMsg("runtest_tempest_cfg is ${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}")
+    }
+    stage('Preparing resources') {
+        if ( PREPARE_RESOURCES.toBoolean() ) {
+            common.infoMsg('Running salt.minion state on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+            common.infoMsg('Running keystone.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+            common.infoMsg('Running glance.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+            common.infoMsg('Running nova.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+        }
+        else {
+            common.infoMsg('Skipping resources preparation')
+        }
+    }
+    stage('Generate config') {
+        if ( GENERATE_CONFIG.toBoolean() ) {
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${runtest_tempest_cfg_dir}"])
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${runtest_tempest_cfg_dir}"])
+            fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+            if (TARGET_NODE != tempest_node) {
+                common.infoMsg("TARGET_NODE is defined in Jenkins")
+                def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+                common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
+                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                             null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
+                salt.checkResult(result)
+            }
+            common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+            // runtest state hangs if tempest_test_target is cfg01*
+            // let's run runtest.generate_tempest_config only for this case
+            if (TARGET_NODE == 'cfg01*') {
+                common.warningMsg("It is not recommended to run Tempest container on cfg node, but.. proceeding")
+                salt.enforceState(saltMaster, SERVICE_NODE, 'runtest.generate_tempest_config', VERBOSE, STOP_ON_ERROR)
+            } else {
                 salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
-                // we need to refresh pillars on target node after runtest state
-                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
-                if (TARGET_NODE != tempest_node) {
-                    common.infoMsg("Reverting tempest_test_target parameter")
-                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
-                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
-                }
-                SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
-                runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
-                if (SKIP_LIST_PATH) {
-                    salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
-                    args += ' --blacklist-file /root/tempest/skip.list '
-                }
             }
-            else {
-                common.infoMsg('Skipping Tempest config generation')
-                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+            // we need to refresh pillars on target node after runtest state
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+            if (TARGET_NODE != tempest_node) {
+                common.infoMsg("Reverting tempest_test_target parameter")
+                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                             null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+            }
+            SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+            if (SKIP_LIST_PATH) {
+                mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/root/tempest/skip.list"]
+                salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+                args += ' --blacklist-file /root/tempest/skip.list '
             }
         }
+        else {
+            common.infoMsg('Skipping Tempest config generation')
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+        }
+    }
 
+    try{
         stage('Run Tempest tests') {
-            mounts = ['/root/test/tempest_generated.conf': '/etc/tempest/tempest.conf']
+            mounts = mounts + ["${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}": "/etc/tempest/tempest.conf"]
             validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
                                   mounts: mounts, name: container_name)
             report_prefix += 'tempest_'
@@ -135,7 +155,7 @@
             else {
                 if (TEMPEST_TEST_PATTERN != 'set=full') {
                     args += " -r ${TEMPEST_TEST_PATTERN} "
-                    report_prefix += 'full'
+                    report_prefix += 'custom'
                 }
             }
             salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
@@ -150,12 +170,8 @@
             archiveArtifacts artifacts: "${report_prefix}.*"
             junit "${report_prefix}.xml"
         }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
     } finally {
-        if (DEBUG_MODE == 'false') {
+        if ( ! DEBUG_MODE.toBoolean() ) {
             validate.runCleanup(saltMaster, TARGET_NODE, container_name)
         }
     }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 63d7b52..29f03fe 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -347,7 +347,7 @@
                 }
 
                 for (i in common.entries(smc)) {
-                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=\"${i[1]}\",' user_data"
+                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=\${${i[0]}:-\"${i[1]}\"},' user_data"
                 }
 
                 // calculate netmask
@@ -416,10 +416,31 @@
                 archiveArtifacts artifacts: "${context['cluster_name']}.tar.gz"
 
                 if (RequesterEmail != '' && !RequesterEmail.contains('example')) {
-                    emailext(to: RequesterEmail,
-                        attachmentsPattern: "output-${context['cluster_name']}/*",
-                        body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                        subject: "Your Salt model ${context['cluster_name']}")
+                    def mailSubject = "Your Salt model ${context['cluster_name']}"
+                    if (context.get('send_method') == 'gcs') {
+                        def gcs = new com.mirantis.mk.GoogleCloudStorage()
+                        def uploadIsos = [ "output-${context['cluster_name']}/${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso" ]
+                        if (context['local_repositories'] == 'True') {
+                            uploadIsos << "output-${context['cluster_name']}/${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+                        }
+                        // generate random hash to have uniq and unpredictable link to file
+                        def randHash = common.generateRandomHashString(64)
+                        def config = [
+                            'creds': context['gcs_creds'],
+                            'project': context['gcs_project'],
+                            'dest': "gs://${context['gcs_bucket']}/${randHash}",
+                            'sources': uploadIsos
+                        ]
+                        def fileURLs = gcs.uploadArtifactToGoogleStorageBucket(config).join(' ').replace('gs://', 'https://storage.googleapis.com/')
+                        emailext(to: RequesterEmail,
+                            body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and available to download via next URL: ${fileURLs} within 7 days.\nEnjoy!\n\nMirantis",
+                            subject: mailSubject)
+                    } else {
+                        emailext(to: RequesterEmail,
+                            attachmentsPattern: "output-${context['cluster_name']}/*",
+                            body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+                            subject: mailSubject)
+                    }
                 }
                 dir("output-${context['cluster_name']}") {
                     deleteDir()
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index fa49bbc..6f14866 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -5,6 +5,22 @@
     timeout(time: 12, unit: 'HOURS') {
         node() {
             try {
+                def sourceCreds = env.SOURCE_CREDENTIALS
+                if (sourceCreds && common.getCredentialsById(sourceCreds, 'password')) {
+                    withCredentials([
+                            [$class          : 'UsernamePasswordMultiBinding',
+                             credentialsId   : sourceCreds,
+                             passwordVariable: 'GIT_PASS',
+                             usernameVariable: 'GIT_USER']
+                    ]) {
+                        sh """
+                            set +x
+                            git config --global credential.${SOURCE_URL}.username \${GIT_USER}
+                            echo "echo \${GIT_PASS}" > askpass.sh && chmod +x askpass.sh
+                        """
+                        env.GIT_ASKPASS = "${env.WORKSPACE}/askpass.sh"
+                    }
+                }
                 if (BRANCHES == '*' || BRANCHES.contains('*')) {
                     branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
                 } else {
@@ -18,7 +34,8 @@
                 dir('source') {
                     checkout changelog: true, poll: true,
                         scm: [$class    : 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [],
+                              userRemoteConfigs: [[credentialsId: sourceCreds, url: SOURCE_URL]]]
                     git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, branches, true)
                 }
             } catch (Throwable e) {
@@ -26,6 +43,9 @@
                 currentBuild.result = 'FAILURE'
                 currentBuild.description = currentBuild.description ? e.message + '' + currentBuild.description : e.message
                 throw e
+            } finally {
+                sh "git config --global --unset credential.${SOURCE_URL}.username || true"
+                deleteDir()
             }
         }
     }
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index f2dd78c..5929390 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -32,6 +32,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify that compute/neutron agents on hosts are up.
  * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -153,6 +155,8 @@
     for (target in upgradeTargets){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
     }
@@ -173,6 +177,9 @@
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
           debian.osUpgradeNode(env, target, upgrade_mode, false)
         }
+        // Workaround for PROD-31413, install python-tornado from latest release if available and
+        // restart minion to apply new code.
+        salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
       }
 
       common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index 7458a27..e768564 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -31,6 +31,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify that compute/neutron agents on hosts are up.
  * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -138,6 +140,8 @@
     for (target in targetNodes){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
 
@@ -158,6 +162,9 @@
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
           debian.osUpgradeNode(env, target, upgrade_mode, false)
         }
+        // Workaround for PROD-31413, install python-tornado from latest release if available and
+        // restart minion to apply new code.
+        salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
       }
 
       common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
index aabdafc..bc252da 100644
--- a/openstack-rabbitmq-upgrade.groovy
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -29,6 +29,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify rabbitmq is running and operational.''',
     'State result': 'Basic checks around services API are passed.'
@@ -114,6 +116,8 @@
     for (target in upgradeTargets){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
     }
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index b80f8ae..fb1259f 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -37,58 +37,71 @@
             }
         }
 
-        stage('Backup') {
-            salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'bash /usr/local/bin/cassandra-backup-runner-call.sh')
-        }
-
         stage('Restore') {
+            // stop neutron-server to prevent CRUD api calls to contrail-api service
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('neutron-server service already stopped')
+            }
             // get opencontrail version
             def contrailVersion = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version")
+            def configDbIp = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:host")
+            def configDbPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:port_configdb")
             common.infoMsg("OpenContrail version is ${contrailVersion}")
             if (contrailVersion.startsWith('4')) {
                 controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary",
                         "docker:client:compose:opencontrail:service:controller:container_name")
                 common.infoMsg("Applying db restore procedure for OpenContrail 4.X version")
                 try {
-                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+                    common.infoMsg("Stop contrail control plane containers")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose down')
                 } catch (Exception err) {
-                    common.errorMsg('An error has been occurred during cassandra db shutdown: ' + err.getMessage())
+                    common.errorMsg('An error has been occurred during contrail containers shutdown: ' + err.getMessage())
                     throw err
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "docker exec ${controllerImage} bash -c 'for f in \$(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/\$f; done'")
+                    common.infoMsg("Cleanup cassandra data")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'for f in $(ls /var/lib/configdb/); do rm -r /var/lib/configdb/$f; done')
                 } catch (Exception err) {
-                    common.errorMsg('Cannot cleanup cassandra data: ' + err.getMessage())
+                    common.errorMsg('Cannot cleanup cassandra data on control nodes: ' + err.getMessage())
                     throw err
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+                    common.infoMsg("Start cassandra db on I@cassandra:backup:client node")
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
                 } catch (Exception err) {
-                    common.errorMsg('An error has been occurred during cassandra db startup: ' + err.getMessage())
+                    common.errorMsg('An error has been occurred during cassandra db startup on I@cassandra:backup:client node: ' + err.getMessage())
                     throw err
                 }
-                // remove restore-already-happenned file if any is present
+                // wait for cassandra to be online
+                common.retry(6, 20){
+                    common.infoMsg("Trying to connect to casandra db on I@cassandra:backup:client node ...")
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "nc -v -z -w2 ${configDbIp} ${configDbPort}")
+                }
+                // remove restore-already-happened file if any is present
                 try {
                     salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
                 } catch (Exception err) {
                     common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
                 }
-                // perform actual backup
                 salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
-                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
-                sleep(5)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
-                // the lovely wait-60-seconds mantra before restarting supervisor-database service
-                sleep(60)
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+                } catch (Exception err) {
+                    common.errorMsg('An error has been occurred during cassandra db startup on I@opencontrail:control and not I@cassandra:backup:client nodes: ' + err.getMessage())
+                    throw err
+                }
                 // another mantra, wait till all services are up
                 sleep(60)
-            } else {
                 try {
-                    salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
-                } catch (Exception er) {
-                    common.warningMsg('neutron-server service already stopped')
+                    common.infoMsg("Start analytics containers node")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+                } catch (Exception err) {
+                    common.errorMsg('An error has been occurred during analytics containers startup: ' + err.getMessage())
+                    throw err
                 }
+            } else {
                 try {
                     salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
                 } catch (Exception er) {
@@ -139,7 +152,6 @@
                 sleep(5)
 
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
 
                 // wait until contrail-status is up
                 salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
@@ -147,10 +159,12 @@
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
             }
+
+            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
         }
 
         stage('Opencontrail controllers health check') {
-            common.retry(3, 20){
+            common.retry(9, 20){
                 salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
             }
         }
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 9eef811..7554530 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -49,8 +49,8 @@
 def verify_es_is_green(master) {
     common.infoMsg('Verify that the Elasticsearch cluster status is green')
     try {
-        def retries_wait = 20
-        def retries = 15
+        def retries_wait = 120
+        def retries = 60
 
         def elasticsearch_vip
         def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
diff --git a/test-model-generator.groovy b/test-model-generator.groovy
index 144f760..8c08493 100644
--- a/test-model-generator.groovy
+++ b/test-model-generator.groovy
@@ -47,6 +47,15 @@
         sh "mkdir -p reports ${apiProject} ${uiProject}"
         def testImage = docker.image(cvpImageName)
         def testImageOptions = "-u root:root --network=host -v ${env.WORKSPACE}/reports:/var/lib/qa_reports --entrypoint=''"
+        withCredentials([
+          [$class          : 'UsernamePasswordMultiBinding',
+          credentialsId   : 'scale-ci',
+          passwordVariable: 'JENKINS_PASSWORD',
+          usernameVariable: 'JENKINS_USER']
+          ]) {
+            env.JENKINS_USER = JENKINS_USER
+            env.JENKINS_PASSWORD = JENKINS_PASSWORD
+        }
         try {
             stage("checkout") {
                 if (event) {
@@ -140,7 +149,7 @@
 
                 dir(apiProject) {
                     python.runVirtualenvCommand("${env.WORKSPACE}/venv",
-                            "export IMAGE=${apiImage.id}; ./bootstrap_env.sh up")
+                            "export IMAGE=${apiImage.id}; export DOCKER_COMPOSE=docker-compose-test.yml; ./bootstrap_env.sh up")
                     common.retry(5, 20) {
                         sh 'curl -v http://127.0.0.1:8001/api/v1 > /dev/null'
                     }
@@ -161,7 +170,7 @@
                         export TEST_PASSWORD=default
                         export TEST_MODELD_URL=127.0.0.1
                         export TEST_MODELD_PORT=3000
-                        export TEST_TIMEOUT=30
+                        export TEST_TIMEOUT=15
                         cd /var/lib/trymcp-tests
                         pytest ${component}
                     """
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 8e5bcdc..3a55011 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -330,6 +330,27 @@
                         "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.updates/system.linux.system.repo.mcp.apt_mirantis.update/g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
                         "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
+
+                    // Switch Jenkins/Gerrit to use LDAP SSL/TLS
+                    def gerritldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                        "grep -r --exclude-dir=aptly 'gerrit_ldap_server: .*' * | grep -Po 'gerrit_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                    if (gerritldapURI.startsWith('ldap://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+                    } else if (! gerritldapURI.startsWith('ldaps://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|gerrit_ldap_server: .*|gerrit_ldap_server: \"ldaps://${gerritldapURI}\"|g'")
+                    }
+                    def jenkinsldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                        "grep -r --exclude-dir=aptly 'jenkins_security_ldap_server: .*' * | grep -Po 'jenkins_security_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                    if (jenkinsldapURI.startsWith('ldap://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+                    } else if (! jenkinsldapURI.startsWith('ldaps://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|jenkins_security_ldap_server: .*|jenkins_security_ldap_server: \"ldaps://${jenkinsldapURI}\"|g'")
+                    }
+
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
                     // Add kubernetes-extra repo
                     if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
@@ -490,7 +511,9 @@
                 common.infoMsg('Perform: updating openssh')
                 salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
 
-                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
+                // Apply changes for HaProxy on CI/CD nodes
+                salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
+
                 salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
 
                 sleep(180)
@@ -505,6 +528,8 @@
                 catch (Exception ex) {
                     error("Docker containers for CI/CD services are having troubles with starting.")
                 }
+
+                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
             }
         }
         catch (Throwable e) {