Merge "Rename PATH variable to avoid collision with system vars" into release/proposed/2019.2.0
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 6b6ec4e..08ebbd1 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -334,6 +334,9 @@
             // Install
             //
 
+            // Check if all minions are reachable and ready
+            salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*'])
+
             if (common.checkContains('STACK_INSTALL', 'core')) {
                 stage('Install core infrastructure') {
                     def staticMgmtNetwork = false
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 4a231dc..80160ab 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -36,6 +36,10 @@
               TARGET_NODE = "I@gerrit:client"
             }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index e96a34c..e933984 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -42,6 +42,10 @@
                   TEMPEST_TARGET_NODE = "I@gerrit:client"
                 }
                 saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+                if (!os_version) {
+                    throw new Exception("Openstack is not found on this env. Exiting")
+                }
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
                 keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 62f5226..ebb7987 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -32,19 +32,24 @@
               TARGET_NODE = "I@gerrit:client"
             }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
+            container_name = "${env.JOB_NAME}"
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
             if (!keystone_creds) {
                 keystone_creds = validate._get_keystone_creds_v2(saltMaster)
             }
-            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
-            validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "")
+            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, container_name, keystone_creds)
+            validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "", "internalURL", "", "", [], container_name)
         }
 
         stage('Run Rally tests') {
             sh "mkdir -p ${artifacts_dir}"
-            validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir)
+            validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir, "docker-rally", container_name)
         }
 
         stage('Collect results') {
@@ -59,7 +64,8 @@
         throw e
     } finally {
         if (DEBUG_MODE == 'false') {
-            validate.runCleanup(saltMaster, TARGET_NODE)
+            validate.openstack_cleanup(saltMaster, TARGET_NODE, container_name)
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
         }
     }
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 22aa9d0..b646130 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -5,148 +5,123 @@
  * Expected parameters:
  *   SALT_MASTER_URL                 URL of Salt master
  *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *   IMAGE                           Docker image link to use for running container with test framework.
+ *   EXTRA_PARAMS                    Yaml context which contains additional setting for job
  *
- *   TESTS_SET                       Leave empty for full run or choose a file (test)
- *   TESTS_REPO                      Repo to clone
- *   TESTS_SETTINGS                  Additional environment varibales to apply
- *   PROXY                           Proxy to use for cloning repo or for pip
- *   IMAGE                           Docker image to use for running container with test framework.
- *   DEBUG_MODE                      If you need to debug (keep container after test), please enabled this
- *  To launch tests from docker images need to set IMAGE and left TESTS_REPO empty
  */
 
 common = new com.mirantis.mk.Common()
 validate = new com.mirantis.mcp.Validate()
-salt = new com.mirantis.mk.Salt()
 salt_testing = new com.mirantis.mk.SaltModelTesting()
-def artifacts_dir = "validation_artifacts"
-def remote_dir = '/root/qa_results'
-def container_workdir = '/var/lib'
-def container_name = "${env.JOB_NAME}"
-def xml_file = "${container_name}_report.xml"
-def TARGET_NODE = "I@gerrit:client"
-def reinstall_env = false
 
-def saltMaster
-def settings
+def EXTRA_PARAMS = readYaml(text: env.getProperty('EXTRA_PARAMS')) ?: [:]
+def env_vars = EXTRA_PARAMS.get("envs") ?: []
 
-slaveNode = (env.getProperty('SLAVE_NODE')) ?: 'docker'
-imageName = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-spt:stable'
+def IMAGE = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-sanity-checks:stable'
+def SLAVE_NODE = (env.getProperty('SLAVE_NODE')) ?: 'docker'
 
-node(slaveNode) {
-    try{
-        stage('Initialization') {
-            sh "rm -rf ${artifacts_dir}"
-            // TODO collaps TESTS_SETTINGS flow into EXTRA variables map
-            if ( TESTS_SETTINGS != "" ) {
-                for (var in TESTS_SETTINGS.tokenize(";")) {
-                    key = var.tokenize("=")[0].trim()
-                    value = var.tokenize("=")[1].trim()
-                    if (key == 'TARGET_NODE') {
-                        TARGET_NODE = value
-                        common.infoMsg("Node for container is set to ${TARGET_NODE}")
-                    }
-                    if (key == 'REINSTALL_ENV') {
-                        reinstall_env = value.toBoolean()
-                    }
-                }
-            }
-            if ( IMAGE == "" ) {
-                common.infoMsg("Env for tests will be built on Jenkins slave")
-                TARGET_NODE = ""
-                validate.prepareVenv(TESTS_REPO, PROXY)
-            } else {
-                saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}/")
-                salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}/")
-                validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
-                if ( TESTS_REPO != "") {
-                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/${container_name}")
-                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
-                    TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
-                    if ( reinstall_env ) {
-                        common.infoMsg("Pip packages in container will be reinstalled based on requirements.txt from ${TESTS_REPO}")
-                        salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} pip install --force-reinstall -r ${container_workdir}/${container_name}/requirements.txt")
-                    }
-                }
-            }
-        }
+/*
+YAML example
+=====
+# commands is a map of commands which looks like step_name: shell_command
+commands:
+  001_prepare: rm /var/lib/g.txt
+  002_prepare: git clone http://repo_with_tests.git
+  003_test: cd repo_with_tests && pytest /var/lib/ --collect-only
+  004_collect: cp cvp-spt /var/lib/validation_artifacts/
+# envs is a list of new environment variables
+envs:
+  - SALT_USERNAME=admin
+  - SALT_PASSWORD=password
+  - drivetrain_version=testing
+*/
 
-        stage('Run Tests') {
-            def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
-            def username = creds.username
-            def password = creds.password
-            def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET} -vv"
+node (SLAVE_NODE) {
+    def artifacts_dir = 'validation_artifacts'
+    def configRun = [:]
+    try {
+        withEnv(env_vars) {
+            stage('Initialization') {
+                def container_workdir = '/var/lib'
+                def test_suite_name = "${env.JOB_NAME}"
+                def workdir = "${container_workdir}/${test_suite_name}"
+                def xml_file = "${test_suite_name}_report.xml"
+                def tests_set = (env.getProperty('tests_set')) ?: ''
+                def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -vv ${tests_set}"
 
-            sh "mkdir -p ${artifacts_dir}"
+                sh "mkdir -p ${artifacts_dir}"
 
-            def configRun = [
-                'image': imageName,
-                'baseRepoPreConfig': false,
-                'dockerMaxCpus': 2,
-                'dockerExtraOpts' : [
-                    "--network=host",
-                    "-v /root/qa_results/:/root/qa_results/",
-                    "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
-                    // TODO remove if all docker images with tests (like cvp-spt) will be transferred into new architucture (like cvp-sanity)
-                    "--entrypoint=''",  // to override ENTRYPOINT=/bin/bash in Dockerfile of image
-                ],
+                // Enrichment for docker commands
+                def commands = EXTRA_PARAMS.get("commands") ?: ['010_start_tests': "cd ${workdir} && with_venv.sh ${script}"]
+                def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
 
-                'envOpts'         : [
-                    "SALT_USERNAME=${username}",
-                    "SALT_PASSWORD=${password}",
+                // Enrichment for env variables
+                def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
+                def env_vars_list  =  [
+                    "SALT_USERNAME=${creds.username}",
+                    "SALT_PASSWORD=${creds.password}",
                     "SALT_URL=${SALT_MASTER_URL}"
-                ] + TESTS_SETTINGS.replaceAll('\\"', '').tokenize(";"),
-                'runCommands'     : [
-                      '010_start_tests'    : {
-                          sh("cd ${container_workdir} && ${script}")
-                      }
-                  ]
-                ]
-            salt_testing.setupDockerAndTest(configRun)
-        }
+                    ] + env_vars
 
-        stage ('Publish results') {
-            archiveArtifacts artifacts: "${artifacts_dir}/*"
-            junit "${artifacts_dir}/*.xml"
-            if (env.JOB_NAME.contains("cvp-spt")) {
-                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
-                     group: 'SPT',
-                     style: 'line',
-                     title: 'SPT Glance results',
-                     xmlSeries: [[
-                     file: "${env.JOB_NAME}_report.xml",
-                     nodeType: 'NODESET',
-                     url: '',
-                     xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
-                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
-                     group: 'SPT',
-                     style: 'line',
-                     title: 'SPT HW2HW results',
-                     xmlSeries: [[
-                     file: "${env.JOB_NAME}_report.xml",
-                     nodeType: 'NODESET',
-                     url: '',
-                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
-                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
-                     group: 'SPT',
-                     style: 'line',
-                     title: 'SPT VM2VM results',
-                     xmlSeries: [[
-                     file: "${env.JOB_NAME}_report.xml",
-                     nodeType: 'NODESET',
-                     url: '',
-                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+                // Generating final config
+                configRun = [
+                    'image': IMAGE,
+                    'baseRepoPreConfig': false,
+                    'dockerMaxCpus': 2,
+                    'dockerExtraOpts' : [
+                        "--network=host",
+                        "-v /root/qa_results/:/root/qa_results/",
+                        "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
+                    ],
+                    'envOpts'         : env_vars_list,
+                    'runCommands'     : commands_list
+                ]
+            }
+
+            stage('Run Tests') {
+                salt_testing.setupDockerAndTest(configRun)
+            }
+
+            stage ('Publish results') {
+                archiveArtifacts artifacts: "${artifacts_dir}/*"
+                junit "${artifacts_dir}/*.xml"
+                if (env.JOB_NAME.contains("cvp-spt")) {
+                    plot csvFileName: 'plot-glance.csv',
+                        group: 'SPT',
+                        style: 'line',
+                        title: 'SPT Glance results',
+                        xmlSeries: [[
+                        file: "${env.JOB_NAME}_report.xml",
+                        nodeType: 'NODESET',
+                        url: '',
+                        xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+                    plot csvFileName: 'plot-hw2hw.csv',
+                        group: 'SPT',
+                        style: 'line',
+                        title: 'SPT HW2HW results',
+                        xmlSeries: [[
+                        file: "${env.JOB_NAME}_report.xml",
+                        nodeType: 'NODESET',
+                        url: '',
+                        xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+                    plot csvFileName: 'plot-vm2vm.csv',
+                        group: 'SPT',
+                        style: 'line',
+                        title: 'SPT VM2VM results',
+                        xmlSeries: [[
+                        file: "${env.JOB_NAME}_report.xml",
+                        nodeType: 'NODESET',
+                        url: '',
+                        xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+                }
             }
         }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
+    }
+    catch (Throwable e) {
         currentBuild.result = "FAILURE"
         throw e
-    } finally {
-        if (DEBUG_MODE == 'false') {
-            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
-            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
-        }
+    }
+    finally {
+        sh "rm -rf ${artifacts_dir}"
     }
 }
diff --git a/cvp-runner_old.groovy b/cvp-runner_old.groovy
new file mode 100644
index 0000000..22aa9d0
--- /dev/null
+++ b/cvp-runner_old.groovy
@@ -0,0 +1,152 @@
+/**
+ *
+ * Launch pytest frameworks in Jenkins
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL                 URL of Salt master
+ *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *   TESTS_SET                       Leave empty for full run or choose a file (test)
+ *   TESTS_REPO                      Repo to clone
+ *   TESTS_SETTINGS                  Additional environment varibales to apply
+ *   PROXY                           Proxy to use for cloning repo or for pip
+ *   IMAGE                           Docker image to use for running container with test framework.
+ *   DEBUG_MODE                      If you need to debug (keep container after test), please enabled this
+ *  To launch tests from docker images need to set IMAGE and left TESTS_REPO empty
+ */
+
+common = new com.mirantis.mk.Common()
+validate = new com.mirantis.mcp.Validate()
+salt = new com.mirantis.mk.Salt()
+salt_testing = new com.mirantis.mk.SaltModelTesting()
+def artifacts_dir = "validation_artifacts"
+def remote_dir = '/root/qa_results'
+def container_workdir = '/var/lib'
+def container_name = "${env.JOB_NAME}"
+def xml_file = "${container_name}_report.xml"
+def TARGET_NODE = "I@gerrit:client"
+def reinstall_env = false
+
+def saltMaster
+def settings
+
+slaveNode = (env.getProperty('SLAVE_NODE')) ?: 'docker'
+imageName = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-spt:stable'
+
+node(slaveNode) {
+    try{
+        stage('Initialization') {
+            sh "rm -rf ${artifacts_dir}"
+            // TODO collaps TESTS_SETTINGS flow into EXTRA variables map
+            if ( TESTS_SETTINGS != "" ) {
+                for (var in TESTS_SETTINGS.tokenize(";")) {
+                    key = var.tokenize("=")[0].trim()
+                    value = var.tokenize("=")[1].trim()
+                    if (key == 'TARGET_NODE') {
+                        TARGET_NODE = value
+                        common.infoMsg("Node for container is set to ${TARGET_NODE}")
+                    }
+                    if (key == 'REINSTALL_ENV') {
+                        reinstall_env = value.toBoolean()
+                    }
+                }
+            }
+            if ( IMAGE == "" ) {
+                common.infoMsg("Env for tests will be built on Jenkins slave")
+                TARGET_NODE = ""
+                validate.prepareVenv(TESTS_REPO, PROXY)
+            } else {
+                saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}/")
+                salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}/")
+                validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
+                if ( TESTS_REPO != "") {
+                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/${container_name}")
+                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
+                    TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
+                    if ( reinstall_env ) {
+                        common.infoMsg("Pip packages in container will be reinstalled based on requirements.txt from ${TESTS_REPO}")
+                        salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} pip install --force-reinstall -r ${container_workdir}/${container_name}/requirements.txt")
+                    }
+                }
+            }
+        }
+
+        stage('Run Tests') {
+            def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
+            def username = creds.username
+            def password = creds.password
+            def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET} -vv"
+
+            sh "mkdir -p ${artifacts_dir}"
+
+            def configRun = [
+                'image': imageName,
+                'baseRepoPreConfig': false,
+                'dockerMaxCpus': 2,
+                'dockerExtraOpts' : [
+                    "--network=host",
+                    "-v /root/qa_results/:/root/qa_results/",
+                    "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
+                    // TODO remove if all docker images with tests (like cvp-spt) will be transferred into new architucture (like cvp-sanity)
+                    "--entrypoint=''",  // to override ENTRYPOINT=/bin/bash in Dockerfile of image
+                ],
+
+                'envOpts'         : [
+                    "SALT_USERNAME=${username}",
+                    "SALT_PASSWORD=${password}",
+                    "SALT_URL=${SALT_MASTER_URL}"
+                ] + TESTS_SETTINGS.replaceAll('\\"', '').tokenize(";"),
+                'runCommands'     : [
+                      '010_start_tests'    : {
+                          sh("cd ${container_workdir} && ${script}")
+                      }
+                  ]
+                ]
+            salt_testing.setupDockerAndTest(configRun)
+        }
+
+        stage ('Publish results') {
+            archiveArtifacts artifacts: "${artifacts_dir}/*"
+            junit "${artifacts_dir}/*.xml"
+            if (env.JOB_NAME.contains("cvp-spt")) {
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT Glance results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT HW2HW results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT VM2VM results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+            }
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
+        }
+    }
+}
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
new file mode 100644
index 0000000..b169ab5
--- /dev/null
+++ b/cvp-tempest.groovy
@@ -0,0 +1,162 @@
+/**
+ *
+ * Launch CVP Tempest verification of the cloud
+ *
+ * Expected parameters:
+
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ *   SERVICE_NODE                Node, where runtest formula and some other states will be executed
+ *   VERBOSE                     Show salt output in Jenkins console
+ *   DEBUG_MODE                  Remove or keep container after the test
+ *   STOP_ON_ERROR               Stop pipeline if error during salt run occurs
+ *   GENERATE_CONFIG             Run runtest formula / generate Tempest config
+ *   SKIP_LIST_PATH              Path to skip list (not in use right now)
+ *   TEST_IMAGE                  Docker image link to use for running container with testing tools.
+ *   TARGET_NODE                 Node to run container with Tempest/Rally
+ *   PREPARE_RESOURCES           Prepare Openstack resources before test run
+ *   TEMPEST_TEST_PATTERN        Tests to run
+ *   TEMPEST_ENDPOINT_TYPE       Type of OS endpoint to use during test run (not in use right now)
+ *   concurrency                 Number of threads to use for Tempest test run
+ *   remote_artifacts_dir        Folder to use for artifacts on remote node
+ *   report_prefix               Some prefix to put to report name
+ *
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def saltMaster
+extraYamlContext = env.getProperty('EXTRA_PARAMS')
+if (extraYamlContext) {
+    common.mergeEnv(env, extraYamlContext) }
+def SALT_MASTER_CREDENTIALS=(env.SALT_MASTER_CREDENTIALS) ?: 'salt'
+def VERBOSE = (env.VERBOSE) ? env.VERBOSE.toBoolean() : true
+def DEBUG_MODE = (env.DEBUG_MODE) ?: false
+def STOP_ON_ERROR = (env.STOP_ON_ERROR) ? env.STOP_ON_ERROR.toBoolean() : false
+def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
+def report_prefix = (env.report_prefix) ?: ''
+def args = ''
+node() {
+    try{
+        stage('Initialization') {
+            deleteDir()
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            container_name = "${env.JOB_NAME}"
+            cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
+            TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+            runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+            if (runtest_node.values()[0]) {
+                // Let's use Service node that was defined in reclass. If several nodes are defined
+                // we will use the first from salt output
+                common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+                SERVICE_NODE = runtest_node.keySet()[0]
+            }
+            else {
+                throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+                                    "into reclass. Check documentation for more details")
+            }
+            common.infoMsg('Refreshing pillars on service node')
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+            tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
+        }
+        stage('Preparing resources') {
+            if ( PREPARE_RESOURCES.toBoolean() ) {
+                common.infoMsg('Running salt.minion state on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+                common.infoMsg('Running keystone.client on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+                common.infoMsg('Running glance.client on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+                common.infoMsg('Running nova.client on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+            }
+            else {
+                common.infoMsg('Skipping resources preparation')
+            }
+        }
+        stage('Generate config') {
+            if ( GENERATE_CONFIG.toBoolean() ) {
+                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+                fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+                TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+                if (TARGET_NODE != tempest_node) {
+                    common.infoMsg("TARGET_NODE is defined in Jenkins")
+                    def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+                    common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
+                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
+                    salt.checkResult(result)
+                }
+                common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+                salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
+                // we need to refresh pillars on target node after runtest state
+                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+                if (TARGET_NODE != tempest_node) {
+                    common.infoMsg("Reverting tempest_test_target parameter")
+                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+                }
+                SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+                runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
+                if (SKIP_LIST_PATH) {
+                    salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+                    args += ' --blacklist-file /root/tempest/skip.list '
+                }
+            }
+            else {
+                common.infoMsg('Skipping Tempest config generation')
+                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+            }
+        }
+
+        stage('Run Tempest tests') {
+            mounts = ['/root/test/tempest_generated.conf': '/etc/tempest/tempest.conf']
+            validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
+                                  mounts: mounts, name: container_name)
+            report_prefix += 'tempest_'
+            if (env.concurrency) {
+                args += ' -w ' + env.concurrency
+            }
+            if (TEMPEST_TEST_PATTERN == 'set=smoke') {
+                args += ' -s '
+                report_prefix += 'smoke'
+            }
+            else {
+                if (TEMPEST_TEST_PATTERN != 'set=full') {
+                    args += " -r ${TEMPEST_TEST_PATTERN} "
+                    report_prefix += 'full'
+                }
+            }
+            salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
+        }
+        stage('Collect results') {
+            report_prefix += "_report_${env.BUILD_NUMBER}"
+            // will be removed after changing runtest-formula logic
+            salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}/reports; mv ${remote_artifacts_dir}/report_* ${remote_artifacts_dir}/reports")
+            validate.addFiles(saltMaster, TARGET_NODE, "${remote_artifacts_dir}/reports", '')
+            sh "mv report_*.xml ${report_prefix}.xml"
+            sh "mv report_*.log ${report_prefix}.log"
+            archiveArtifacts artifacts: "${report_prefix}.*"
+            junit "${report_prefix}.xml"
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
+        }
+    }
+}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 124f96b..62a6e00 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -140,11 +140,15 @@
                         def secretKeyID = RequesterEmail ?: "salt@${context['cluster_domain']}".toString()
                         if (!context.get('secrets_encryption_private_key')) {
                             def batchData = """
+                                %echo Generating a basic OpenPGP key for Salt-Master
+                                %no-protection
                                 Key-Type: 1
                                 Key-Length: 4096
                                 Expire-Date: 0
                                 Name-Real: ${context['salt_master_hostname']}.${context['cluster_domain']}
                                 Name-Email: ${secretKeyID}
+                                %commit
+                                %echo done
                             """.stripIndent()
                             writeFile file:'gpg-batch.txt', text:batchData
                             sh "gpg --gen-key --batch < gpg-batch.txt"
@@ -152,7 +156,7 @@
                         } else {
                             writeFile file:'gpgkey.asc', text:context['secrets_encryption_private_key']
                             sh "gpg --import gpgkey.asc"
-                            secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | awk -F: -e "/^sec/{print \\$5; exit}"').trim()
+                            secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | grep -E "^sec" | awk -F: \'{print \$5}\'').trim()
                         }
                         context['secrets_encryption_key_id'] = secretKeyID
                     }
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
new file mode 100644
index 0000000..aabdafc
--- /dev/null
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -0,0 +1,155 @@
+/**
+ * Upgrade RabbitMQ packages on msg nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS            Credentials to the Salt API.
+ *   SALT_MASTER_URL                    Full Salt API address [http://10.10.10.15:6969].
+ *   OS_DIST_UPGRADE                    Upgrade system packages including kernel (apt-get dist-upgrade)
+ *   OS_UPGRADE                         Upgrade all installed applications (apt-get upgrade)
+ *   TARGET_SERVERS                     Comma separated list of salt compound definitions to upgrade.
+ *   INTERACTIVE                        Ask interactive questions during pipeline run (bool).
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
+
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+  [
+    'Description': 'Only non destructive actions will be applied during this phase. Basic service verification will be performed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+    'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify rabbitmq is running and operational.''',
+    'State result': 'Basic checks around services API are passed.'
+  ])
+
+upgradeStageMap.put('Stop RabbitMQ service',
+  [
+    'Description': 'All rabbitmq services will be stopped on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * RabbitMQ services are stopped.
+ * OpenStack APIs are not accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Stop RabbitMQ services''',
+    'State result': 'RabbitMQ service is stopped',
+  ])
+
+upgradeStageMap.put('Upgrade OS',
+  [
+    'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No workload downtime
+ * The nodes might be rebooted''',
+    'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * System packages are updated
+ * Node might be rebooted
+'''
+  ])
+
+upgradeStageMap.put('Upgrade RabbitMQ server',
+   [
+    'Description': 'RabbitMQ and Erlang code will be upgraded during this stage. No workload downtime is expected.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * OpenStack services loose connection to rabbitmq-server
+ * No workload downtime''',
+    'Launched actions': '''
+ * Install new version of RabbitMQ and Erlang packages
+ * Render version of configs''',
+    'State result': '''
+ * RabbitMQ packages are upgraded''',
+  ])
+
+upgradeStageMap.put('Start RabbitMQ service',
+   [
+    'Description': 'All rabbitmq services will be running on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * RabbitMQ service is running.
+ * OpenStack API are accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Start RabbitMQ service''',
+    'State result': 'RabbitMQ service is running',
+  ])
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+
+    stage('Setup virtualenv for Pepper') {
+      python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+    if (upgradeTargets.isEmpty()) {
+      error("No servers for upgrade matched by ${TARGET_SERVERS}")
+    }
+
+    def stopTargets = upgradeTargets.reverse()
+
+    common.printStageMap(upgradeStageMap)
+    if (interactive){
+      input message: common.getColorizedString(
+        "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+    }
+
+    for (target in upgradeTargets){
+      common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
+      }
+    }
+
+    for (target in stopTargets) {
+      common.stageWrapper(upgradeStageMap, "Stop RabbitMQ service", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+        if (OS_DIST_UPGRADE.toBoolean() == true){
+          upgrade_mode = 'dist-upgrade'
+        } else if (OS_UPGRADE.toBoolean() == true){
+          upgrade_mode = 'upgrade'
+        }
+        if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+          debian.osUpgradeNode(env, target, upgrade_mode, false)
+        }
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Upgrade RabbitMQ server", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+        openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Start RabbitMQ service", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+        openstack.applyOpenstackAppsStates(env, target)
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
+      }
+    }
+  }
+}
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 59c616e..cde1401 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -27,15 +27,14 @@
 def waitForHealthy(master, tgt, attempts=100, timeout=10) {
     // wait for healthy cluster
     common = new com.mirantis.mk.Common()
-    common.retry(attempts, timeout){
-        def health = runCephCommand(master, tgt, 'ceph health')['return'][0].values()[0]
+    while (count<attempts) {
+        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
         if (health.contains('HEALTH_OK') || health.contains('HEALTH_WARN noout flag(s) set\n')) {
             common.infoMsg('Cluster is healthy')
-            return 0
-        } else {
-            common.infoMsg(health)
-            throw new Exception()
+            break;
         }
+        count++
+        sleep(10)
     }
 }
 
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index c34215d..bff2589 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -479,8 +479,13 @@
                 }
 
                 // update minions certs
+                // call for `salt.minion.ca` state on related nodes to make sure
+                // mine was updated with required data after salt-minion/salt-master restart salt:minion:ca
+                salt.enforceState(venvPepper, "I@salt:minion:ca", 'salt.minion.ca', true)
                 salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
 
+                // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
+                salt.enforceState([saltId: venvPepper, target: "I@salt:minion ${extra_tgt}", state: ['salt.minion'], read_timeout: 60, retries: 2])
                 // Retry needed only for rare race-condition in user appearance
                 common.infoMsg('Perform: updating users and keys')
                 salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)