Merge "Add handling of resultCodes for disk i/o utilization check"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 0e0b106..de8d8fd 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -199,6 +199,12 @@
                             common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
                         }
 
+                        // put reclass-system repo to branch/tag/ref. If empty will be used reclass pinned commit in AIO model.
+                        if (common.validInputParam('RECLASS_SYSTEM_BRANCH')) {
+                            common.infoMsg("Setting reclass-system repo to ${RECLASS_SYSTEM_BRANCH} branch/tag/ref")
+                            envParams.put('cfg_reclass_system_branch', RECLASS_SYSTEM_BRANCH)
+                        }
+
                         // put formulas revision - stable, testing or nightly
                         if (common.validInputParam('FORMULA_PKG_REVISION')) {
                             common.infoMsg("Setting formulas revision to ${FORMULA_PKG_REVISION}")
@@ -364,8 +370,7 @@
 
             stage('Install infra') {
                 if (common.checkContains('STACK_INSTALL', 'core') ||
-                    common.checkContains('STACK_INSTALL', 'openstack') ||
-                    common.checkContains('STACK_INSTALL', 'oss')) {
+                    common.checkContains('STACK_INSTALL', 'openstack')) {
                         orchestrate.installInfra(venvPepper, extra_tgt)
                 }
             }
@@ -553,12 +558,6 @@
                 }
             }
 
-            if (common.checkContains('STACK_INSTALL', 'oss')) {
-              stage('Install Oss infra') {
-                orchestrate.installOssInfra(venvPepper, extra_tgt)
-              }
-            }
-
             if (common.checkContains('STACK_INSTALL', 'cicd')) {
                 stage('Install Cicd') {
                     extra_tgt_bckp = extra_tgt
@@ -592,16 +591,6 @@
                 }
             }
 
-            if (common.checkContains('STACK_INSTALL', 'oss')) {
-              stage('Install OSS') {
-                if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
-                  // In case if StackLightv2 enabled containers already started
-                  salt.enforceState(venvPepper, "I@docker:swarm:role:master and I@devops_portal:config ${extra_tgt}", 'docker.client', true)
-                }
-                orchestrate.installOss(venvPepper, extra_tgt)
-              }
-            }
-
             //
             // Test
             //
@@ -646,47 +635,6 @@
                 }
             }
 
-            if (common.checkContains('STACK_TEST', 'openstack')) {
-                if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
-                    test.install_docker(venvPepper, TEST_TEMPEST_TARGET)
-                }
-                stage('Run OpenStack tests') {
-                    test.runTempestTests(venvPepper, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
-                }
-
-                stage('Copy Tempest results to config node') {
-                    test.copyTempestResults(venvPepper, TEST_TEMPEST_TARGET)
-                }
-
-                stage('Archive rally artifacts') {
-                    test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
-                }
-
-                if (common.validInputParam('TESTRAIL_REPORT') && TESTRAIL_REPORT.toBoolean()) {
-                    stage('Upload test results to TestRail') {
-                        def date = sh(script: 'date +%Y-%m-%d', returnStdout: true).trim()
-                        def plan = TESTRAIL_PLAN ?: "[${TESTRAIL_MILESTONE}]System-Devcloud-${date}"
-                        def group = TESTRAIL_GROUP ?: STACK_TEMPLATE
-
-                        salt.cmdRun(venvPepper, TEST_TEMPEST_TARGET, "cd /root/rally_reports && cp \$(ls -t *xml | head -n1) report.xml")
-                        test.uploadResultsTestrail("/root/rally_reports/report.xml",
-                                TESTRAIL_REPORTER_IMAGE, group, TESTRAIL_QA_CREDENTIALS,
-                                plan, TESTRAIL_MILESTONE, TESTRAIL_SUITE)
-                    }
-                }
-            }
-
-
-            if (common.checkContains('STACK_TEST', 'ceph')) {
-                stage('Run infra tests') {
-                    sleep(120)
-                    def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
-                    salt.cmdRun(venvPepper, 'I@salt:master', cmd, false)
-                    writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper, 'I@salt:master', '/root/report.xml'))
-                    junit(keepLongStdio: true, testResults: 'report.xml')
-                }
-            }
-
             if (common.checkContains('STACK_TEST', 'opencontrail')) {
                 stage('Run opencontrail tests') {
                     def opencontrail_tests_dir = "/opt/opencontrail_test/fuel-plugin-contrail/plugin_test/vapor/"
diff --git a/cvp-sanity.groovy b/cvp-sanity.groovy
deleted file mode 100644
index 7adca5a..0000000
--- a/cvp-sanity.groovy
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- *
- * Launch sanity verification of the cloud
- *
- * Expected parameters:
- *   SALT_MASTER_URL             URL of Salt master
- *   SALT_MASTER_CREDENTIALS     Credentials to the Salt API
- *
- *   SANITY_TESTS_SET            Leave empty for full run or choose a file (test), e.g. test_mtu.py
- *   SANITY_TESTS_REPO           CVP-sanity-checks repo to clone
- *   SANITY_TESTS_SETTINGS       Additional envrionment variables for cvp-sanity-checks
- *   PROXY                       Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-def artifacts_dir = 'validation_artifacts/'
-timeout(time: 12, unit: 'HOURS') {
-    node() {
-        try{
-            stage('Initialization') {
-                validate.prepareVenv(SANITY_TESTS_REPO, PROXY)
-            }
-
-            stage('Run Infra tests') {
-                sh "mkdir -p ${artifacts_dir}"
-                validate.runSanityTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, SANITY_TESTS_SET, artifacts_dir, SANITY_TESTS_SETTINGS)
-            }
-            stage ('Publish results') {
-                archiveArtifacts artifacts: "${artifacts_dir}/*"
-                junit "${artifacts_dir}/*.xml"
-            }
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            throw e
-        }
-    }
-}
diff --git a/cvp-spt.groovy b/cvp-spt.groovy
deleted file mode 100644
index b9d53d5..0000000
--- a/cvp-spt.groovy
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- *
- * Launch pytest frameworks in Jenkins
- *
- * Expected parameters:
- *   SALT_MASTER_URL                 URL of Salt master
- *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
- *
- *   TESTS_SET                       Leave empty for full run or choose a file (test)
- *   TESTS_REPO                      Repo to clone
- *   TESTS_SETTINGS                  Additional environment varibales to apply
- *   PROXY                           Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-node() {
-    try{
-        stage('Initialization') {
-            validate.prepareVenv(TESTS_REPO, PROXY)
-        }
-
-        stage('Run Tests') {
-            validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, '', TESTS_SETTINGS)
-        }
-        stage ('Publish results') {
-            archiveArtifacts artifacts: "*"
-            junit "*.xml"
-            plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
-                 group: 'SPT',
-                 style: 'line',
-                 title: 'SPT Glance results',
-                 xmlSeries: [[
-                 file: "report.xml",
-                 nodeType: 'NODESET',
-                 url: '',
-                 xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
-            plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
-                 group: 'SPT',
-                 style: 'line',
-                 title: 'SPT HW2HW results',
-                 xmlSeries: [[
-                 file: "report.xml",
-                 nodeType: 'NODESET',
-                 url: '',
-                 xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
-            plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
-                 group: 'SPT',
-                 style: 'line',
-                 title: 'SPT VM2VM results',
-                 xmlSeries: [[
-                 file: "report.xml",
-                 nodeType: 'NODESET',
-                 url: '',
-                 xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
-        }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
-    }
-}
diff --git a/cvp-stacklight.groovy b/cvp-stacklight.groovy
deleted file mode 100644
index e7ce974..0000000
--- a/cvp-stacklight.groovy
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- *
- * Temporary pipeline for running cvp-stacklight job
- *
- * Expected parameters:
- *   SALT_MASTER_URL                 URL of Salt master
- *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
- *
- *   TESTS_SET                       Leave empty for full run or choose a file (test)
- *   TESTS_REPO                      Repo to clone
- *   TESTS_SETTINGS                  Additional environment varibales to apply
- *   PROXY                           Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-def artifacts_dir = 'validation_artifacts/'
-
-node() {
-    stage('Initialization') {
-        validate.prepareVenv(TESTS_REPO, PROXY)
-    }
-
-    stage('Run Tests') {
-        sh "mkdir -p ${artifacts_dir}"
-        validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
-    }
-    stage ('Publish results') {
-        archiveArtifacts artifacts: "${artifacts_dir}/*"
-        junit "${artifacts_dir}/*.xml"
-    }
-}
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
new file mode 100644
index 0000000..c6fca0a
--- /dev/null
+++ b/cvp-tempest.groovy
@@ -0,0 +1,168 @@
+/**
+ *
+ * Launch CVP Tempest verification of the cloud
+ *
+ * Expected parameters:
+
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ *   SERVICE_NODE                Node, where runtest formula and some other states will be executed
+ *   VERBOSE                     Show salt output in Jenkins console
+ *   DEBUG_MODE                  Remove or keep container after the test
+ *   STOP_ON_ERROR               Stop pipeline if error during salt run occurs
+ *   GENERATE_CONFIG             Run runtest formula / generate Tempest config
+ *   SKIP_LIST_PATH              Path to skip list (not in use right now)
+ *   TEST_IMAGE                  Docker image link to use for running container with testing tools.
+ *   TARGET_NODE                 Node to run container with Tempest/Rally
+ *   PREPARE_RESOURCES           Prepare Openstack resources before test run
+ *   TEMPEST_TEST_PATTERN        Tests to run
+ *   TEMPEST_ENDPOINT_TYPE       Type of OS endpoint to use during test run (not in use right now)
+ *   concurrency                 Number of threads to use for Tempest test run
+ *   remote_artifacts_dir        Folder to use for artifacts on remote node
+ *   report_prefix               Some prefix to put to report name
+ *
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def saltMaster
+extraYamlContext = env.getProperty('EXTRA_PARAMS')
+if (extraYamlContext) {
+    common.mergeEnv(env, extraYamlContext) }
+def SALT_MASTER_CREDENTIALS=(env.SALT_MASTER_CREDENTIALS) ?: 'salt'
+def VERBOSE = (env.VERBOSE) ?: true
+def DEBUG_MODE = (env.DEBUG_MODE) ?: false
+def STOP_ON_ERROR = (env.STOP_ON_ERROR) ?: false
+def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
+def report_prefix = (env.report_prefix) ?: ''
+def args = ''
+node() {
+    try{
+        stage('Initialization') {
+            deleteDir()
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (os_version == '') {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
+            TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+            runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+            if (runtest_node.values()[0]) {
+                // Let's use Service node that was defined in reclass. If several nodes are defined
+                // we will use the first from salt output
+                common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+                SERVICE_NODE = runtest_node.keySet()[0]
+            }
+            else {
+                common.infoMsg("Service node is not defined in reclass")
+                SERVICE_NODE = (env.SERVICE_NODE) ?: 'I@salt:master'
+                common.infoMsg("${SERVICE_NODE} will be used as Service node")
+                def classes_to_add = ["cluster.${cluster_name}.infra.runtest"]
+                fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+                common.infoMsg("Full service node name ${fullnodename}")
+                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                             null, null, ['name': fullnodename, 'classes': classes_to_add])
+                salt.checkResult(result)
+            }
+            common.infoMsg('Refreshing pillars on service node')
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+            tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
+        }
+        stage('Preparing resources') {
+            if ( PREPARE_RESOURCES.toBoolean() ) {
+                common.infoMsg('Running salt.minion state on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+                common.infoMsg('Running keystone.client on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+                common.infoMsg('Running glance.client on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+                common.infoMsg('Running nova.client on service node')
+                salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+            }
+            else {
+                common.infoMsg('Skipping resources preparation')
+            }
+        }
+        stage('Generate config') {
+            if ( GENERATE_CONFIG ) {
+                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+                fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+                TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+                if (TARGET_NODE != tempest_node) {
+                    common.infoMsg("TARGET_NODE is defined in Jenkins")
+                    def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+                    common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
+                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
+                    salt.checkResult(result)
+                }
+                common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+                salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
+                // we need to refresh pillars on target node after runtest state
+                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+                if (TARGET_NODE != tempest_node) {
+                    common.infoMsg("Reverting tempest_test_target parameter")
+                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+                }
+                SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+                runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
+                if (SKIP_LIST_PATH) {
+                    salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+                    args += ' --blacklist-file /root/tempest/skip.list '
+                }
+            }
+            else {
+                common.infoMsg('Skipping Tempest config generation')
+                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+            }
+        }
+
+        stage('Run Tempest tests') {
+            // parameters: master, target, dockerImageLink, name, env_var, entrypoint, tempestConfLocalPath
+            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', [], true,
+                                  '/root/test/tempest_generated.conf')
+            report_prefix += 'tempest_'
+            if (env.concurrency) {
+                args += ' -w ' + env.concurrency
+            }
+            if (TEMPEST_TEST_PATTERN == 'set=smoke') {
+                args += ' -s '
+                report_prefix += 'smoke'
+            }
+            else {
+                if (TEMPEST_TEST_PATTERN != 'set=full') {
+                    args += " -r ${TEMPEST_TEST_PATTERN} "
+                    report_prefix += 'full'
+                }
+            }
+            salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' cvp /bin/bash -c 'run-tempest'")
+        }
+        stage('Collect results') {
+            report_prefix += "_report_${env.BUILD_NUMBER}"
+            // will be removed after changing runtest-formula logic
+            salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}/reports; mv ${remote_artifacts_dir}/report_* ${remote_artifacts_dir}/reports")
+            validate.addFiles(saltMaster, TARGET_NODE, "${remote_artifacts_dir}/reports", '')
+            sh "mv report_*.xml ${report_prefix}.xml"
+            sh "mv report_*.log ${report_prefix}.log"
+            archiveArtifacts artifacts: "${report_prefix}.*"
+            junit "${report_prefix}.xml"
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            validate.runCleanup(saltMaster, TARGET_NODE)
+        }
+    }
+}
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index b40bfec..e7887f9 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -70,10 +70,18 @@
                                 if (isJobExists(testJob)) {
                                     common.infoMsg("Test job ${testJob} found, running")
                                     def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
-                                    build job: testJob, parameters: [
-                                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
-                                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
-                                    ]
+                                    if (JOBS_NAMESPACE.equals("salt-formulas")) {
+                                        build job: testJob, parameters: [
+                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC],
+                                            [$class: 'StringParameterValue', name: 'GATING_GERRIT_BRANCH', value: GERRIT_BRANCH]
+                                        ]
+                                    } else {
+                                        build job: testJob, parameters: [
+                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+                                            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+                                        ]
+                                    }
                                     giveVerify = true
                                 } else {
                                     common.infoMsg("Test job ${testJob} not found")
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 72c5cba..99ee3ea 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -8,6 +8,7 @@
  **/
 import static groovy.json.JsonOutput.toJson
 import static groovy.json.JsonOutput.prettyPrint
+import org.apache.commons.net.util.SubnetUtils
 
 common = new com.mirantis.mk.Common()
 common2 = new com.mirantis.mcp.Common()
@@ -294,6 +295,16 @@
                     sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
                 }
 
+                // calculate netmask
+                def deployNetworkSubnet = ''
+                if (context.get('deploy_network_subnet')) {
+                    def subnet = new SubnetUtils(context['deploy_network_subnet'])
+                    deployNetworkSubnet = subnet.getInfo().getNetmask()
+                } else if (context.get('deploy_network_netmask')) { // case for 2018.4.0
+                    deployNetworkSubnet = context['deploy_network_netmask']
+                } else {
+                    error('Neither context parameter deploy_network_subnet or deploy_network_netmask should be set!')
+                }
                 // create cfg config-drive
                 if (outdateGeneration) {
                     args += ["--hostname ${context['salt_master_hostname']}", "${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"]
@@ -301,10 +312,10 @@
                 } else {
                     args += [
                         "--name ${context['salt_master_hostname']}", "--hostname ${context['salt_master_hostname']}.${context['cluster_domain']}", "--clean-up",
-                        "--ip ${context['salt_master_management_address']}", "--netmask ${context['deploy_network_netmask']}", "--gateway ${context['deploy_network_gateway']}",
+                        "--ip ${context['salt_master_management_address']}", "--netmask ${deployNetworkSubnet}", "--gateway ${context['deploy_network_gateway']}",
                         "--dns-nameservers ${context['dns_server01']},${context['dns_server02']}"
                     ]
-                    sh "python ./create-config-drive.py ${args.join(' ')}"
+                    sh "chmod 0755 create-config-drive.py ; ./create-config-drive.py ${args.join(' ')}"
                 }
                 sh("mkdir output-${context['cluster_name']} && mv ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso output-${context['cluster_name']}/")
 
@@ -332,7 +343,7 @@
                         sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
                     } else {
                         args = [
-                            "--ip ${context['aptly_server_deploy_address']}", "--netmask ${context['deploy_network_netmask']}", "--gateway ${context['deploy_network_gateway']}",
+                            "--ip ${context['aptly_server_deploy_address']}", "--netmask ${deployNetworkSubnet}", "--gateway ${context['deploy_network_gateway']}",
                             "--user-data mirror_config", "--hostname ${aptlyServerHostname}.${context['cluster_domain']}", "--name ${aptlyServerHostname}", "--clean-up",
                             "--dns-nameservers ${context['dns_server01']},${context['dns_server02']}"
                         ]
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
deleted file mode 100644
index 61015f5..0000000
--- a/lab-pipeline.groovy
+++ /dev/null
@@ -1,390 +0,0 @@
-/**
-
-
-
-
-
-
-
-
-
-* This pipeline is deprecated, please use cloud-deploy-pipeline
-
-
-
-
-
-
-
-
-
-
-
-
- *
- * Launch heat stack with basic k8s
- * Flow parameters:
- *   STACK_NAME                  Heat stack name
- *   STACK_TYPE                  Orchestration engine: heat, ''
- *   STACK_INSTALL               What should be installed (k8s, openstack, ...)
- *   STACK_TEST                  What should be tested (k8s, openstack, ...)
- *
- *   STACK_TEMPLATE_URL          URL to git repo with stack templates
- *   STACK_TEMPLATE_BRANCH       Stack templates repo branch
- *   STACK_TEMPLATE_CREDENTIALS  Credentials to the stack templates repo
- *   STACK_TEMPLATE              Heat stack HOT template
- *   STACK_RECLASS_ADDRESS       Stack reclass address
- *   STACK_RECLASS_BRANCH        Stack reclass repo branch
- *   STACK_DELETE                Delete stack when finished (bool)
- *   STACK_REUSE                 Reuse stack (don't create one)
- *   STACK_CLEANUP_JOB           Name of job for deleting Heat stack
- *
- * Expected parameters:
- * required for STACK_TYPE=heat
- *   HEAT_STACK_ENVIRONMENT       Heat stack environmental parameters
- *   HEAT_STACK_ZONE              Heat stack availability zone
- *   HEAT_STACK_PUBLIC_NET        Heat stack floating IP pool
- *   OPENSTACK_API_URL            OpenStack API address
- *   OPENSTACK_API_CREDENTIALS    Credentials to the OpenStack API
- *   OPENSTACK_API_PROJECT        OpenStack project to connect to
- *   OPENSTACK_API_PROJECT_DOMAIN Domain for OpenStack project
- *   OPENSTACK_API_PROJECT_ID     ID for OpenStack project
- *   OPENSTACK_API_USER_DOMAIN    Domain for OpenStack user
- *   OPENSTACK_API_CLIENT         Versions of OpenStack python clients
- *   OPENSTACK_API_VERSION        Version of the OpenStack API (2/3)
- *
- *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API
- *
- * required for STACK_TYPE=NONE or empty string
- *   SALT_MASTER_URL            URL of Salt-API
- *
- * Test settings:
- *   TEST_K8S_API_SERVER     Kubernetes API address
- *   TEST_K8S_CONFORMANCE_IMAGE   Path to docker image with conformance e2e tests
- *
- *   TEST_TEMPEST_IMAGE           Tempest image link
- *   TEST_DOCKER_INSTALL          Install docker on the target if tue
- *   TEST_TEMPEST_PATTERN         If not false, run tests matched to pattern only
- *   TEST_TEMPEST_TARGET          Salt target for tempest node
- *
- * optional parameters for overwriting soft params
- *   SALT_OVERRIDES              YAML with overrides for Salt deployment
- *
- */
-
-common = new com.mirantis.mk.Common()
-git = new com.mirantis.mk.Git()
-openstack = new com.mirantis.mk.Openstack()
-orchestrate = new com.mirantis.mk.Orchestrate()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-
-_MAX_PERMITTED_STACKS = 2
-
-node {
-    // try to get STACK_INSTALL or fallback to INSTALL if exists
-    try {
-      def temporary = STACK_INSTALL
-    } catch (MissingPropertyException e) {
-      try {
-        STACK_INSTALL = INSTALL
-        env['STACK_INSTALL'] = INSTALL
-      } catch (MissingPropertyException e2) {
-        common.errorMsg("Property STACK_INSTALL or INSTALL not found!")
-      }
-    }
-    try {
-        //
-        // Prepare machines
-        //
-        stage ('Create infrastructure') {
-
-            if (STACK_TYPE == 'heat') {
-                // value defaults
-                def openstackCloud
-                def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-                def openstackEnv = "${env.WORKSPACE}/venv"
-
-                if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
-                    error("If you want to reuse existing stack you need to provide it's name")
-                }
-
-                if (STACK_REUSE.toBoolean() == false) {
-                    // Don't allow to set custom heat stack name
-                    wrap([$class: 'BuildUser']) {
-                        if (env.BUILD_USER_ID) {
-                            STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
-                        } else {
-                            STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
-                        }
-                        currentBuild.description = STACK_NAME
-                    }
-                }
-
-                // set description
-                currentBuild.description = "${STACK_NAME}"
-
-                // get templates
-                git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
-
-                // create openstack env
-                openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-                openstackCloud = openstack.createOpenstackEnv(
-                    OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
-                    OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
-                    OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
-                    OPENSTACK_API_VERSION)
-                openstack.getKeystoneToken(openstackCloud, openstackEnv)
-                //
-                // Verify possibility of create stack for given user and stack type
-                //
-                wrap([$class: 'BuildUser']) {
-                    if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !env.BUILD_USER_ID.equals("mceloud") && !STACK_REUSE.toBoolean()) {
-                        def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
-                        if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
-                            STACK_DELETE = "false"
-                            throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
-                        }
-                    }
-                }
-                // launch stack
-                if (STACK_REUSE.toBoolean() == false) {
-                    stage('Launch new Heat stack') {
-                        // create stack
-                        envParams = [
-                            'instance_zone': HEAT_STACK_ZONE,
-                            'public_net': HEAT_STACK_PUBLIC_NET
-                        ]
-                        try {
-                            envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
-                            envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
-                        } catch (MissingPropertyException e) {
-                            common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
-                        }
-                        openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
-                    }
-                }
-
-                // get SALT_MASTER_URL
-                saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
-                currentBuild.description = "${STACK_NAME}: ${saltMasterHost}"
-
-                SALT_MASTER_URL = "http://${saltMasterHost}:6969"
-            }
-        }
-
-        //
-        // Connect to Salt master
-        //
-
-        def saltMaster
-        stage('Connect to Salt API') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        // Set up override params
-        if (env.getEnvironment().containsKey('SALT_OVERRIDES')) {
-            stage('Set Salt overrides') {
-                salt.setSaltOverrides(saltMaster,  SALT_OVERRIDES)
-            }
-        }
-
-        //
-        // Install
-        //
-
-        if (common.checkContains('STACK_INSTALL', 'core')) {
-            stage('Install core infrastructure') {
-                orchestrate.installFoundationInfra(saltMaster)
-
-                if (common.checkContains('STACK_INSTALL', 'kvm')) {
-                    orchestrate.installInfraKvm(saltMaster)
-                    orchestrate.installFoundationInfra(saltMaster)
-                }
-
-                orchestrate.validateFoundationInfra(saltMaster)
-            }
-        }
-
-        // install k8s
-        if (common.checkContains('STACK_INSTALL', 'k8s')) {
-
-            // install infra libs for k8s
-            stage('Install Kubernetes infra') {
-                orchestrate.installKubernetesInfra(saltMaster)
-            }
-
-            // If k8s install with contrail network manager then contrail need to be install first
-            if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                stage('Install Contrail for Kubernetes') {
-                    orchestrate.installContrailNetwork(saltMaster)
-                    orchestrate.installContrailCompute(saltMaster)
-                    orchestrate.installKubernetesContrailCompute(saltMaster)
-                }
-            }
-
-            stage('Install Kubernetes control') {
-                orchestrate.installKubernetesControl(saltMaster)
-            }
-        }
-
-        // install openstack
-        if (common.checkContains('STACK_INSTALL', 'openstack')) {
-            // install Infra and control, tests, ...
-
-            stage('Install OpenStack infra') {
-                orchestrate.installOpenstackInfra(saltMaster)
-            }
-
-            stage('Install OpenStack control') {
-                orchestrate.installOpenstackControl(saltMaster)
-            }
-
-            stage('Install OpenStack network') {
-
-                if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    orchestrate.installContrailNetwork(saltMaster)
-                } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
-                    orchestrate.installOpenstackNetwork(saltMaster)
-                }
-
-                salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; neutron net-list')
-                salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; nova net-list')
-            }
-
-            if (salt.testTarget(saltMaster, 'I@ironic:conductor')){
-                stage('Install OpenStack Ironic conductor') {
-                    orchestrate.installIronicConductor(saltMaster)
-                }
-            }
-
-
-            stage('Install OpenStack compute') {
-                orchestrate.installOpenstackCompute(saltMaster)
-
-                if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    orchestrate.installContrailCompute(saltMaster)
-                }
-            }
-
-        }
-
-
-        if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
-            stage('Install StackLight v1') {
-                orchestrate.installStacklightv1Control(saltMaster)
-                orchestrate.installStacklightv1Client(saltMaster)
-            }
-        }
-
-        if (common.checkContains('STACK_INSTALL', 'stacklight')) {
-            stage('Install StackLight') {
-                orchestrate.installDockerSwarm(saltMaster)
-                orchestrate.installStacklight(saltMaster)
-            }
-        }
-
-        //
-        // Test
-        //
-        def artifacts_dir = '_artifacts/'
-
-        if (common.checkContains('STACK_TEST', 'k8s')) {
-            stage('Run k8s bootstrap tests') {
-                def image = 'tomkukral/k8s-scripts'
-                def output_file = image.replaceAll('/', '-') + '.output'
-
-                // run image
-                test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
-
-                // collect output
-                sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                sh "cat ${artifacts_dir}${output_file}"
-
-                // collect artifacts
-                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-            }
-
-            stage('Run k8s conformance e2e tests') {
-                //test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, TEST_K8S_CONFORMANCE_IMAGE)
-
-                def image = TEST_K8S_CONFORMANCE_IMAGE
-                def output_file = image.replaceAll('/', '-') + '.output'
-
-                // run image
-                test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
-
-                // collect output
-                sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                sh "cat ${artifacts_dir}${output_file}"
-
-                // collect artifacts
-                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-            }
-        }
-
-        if (common.checkContains('STACK_TEST', 'openstack')) {
-            if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
-                test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
-            }
-            stage('Run OpenStack tests') {
-                test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
-            }
-
-            stage('Copy Tempest results to config node') {
-                test.copyTempestResults(saltMaster, TEST_TEMPEST_TARGET)
-            }
-        }
-
-        if (common.checkContains('STACK_INSTALL', 'finalize')) {
-            stage('Finalize') {
-                salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
-            }
-        }
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
-    } finally {
-
-
-        //
-        // Clean
-        //
-
-        if (STACK_TYPE == 'heat') {
-            // send notification
-            common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
-
-            if (STACK_DELETE.toBoolean() == true) {
-                common.errorMsg('Heat job cleanup triggered')
-                stage('Trigger cleanup job') {
-                    build(job: STACK_CLEANUP_JOB, parameters: [
-                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
-                        [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_URL', value: OPENSTACK_API_URL],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_CREDENTIALS', value: OPENSTACK_API_CREDENTIALS],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_DOMAIN', value: OPENSTACK_API_PROJECT_DOMAIN],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_ID', value: OPENSTACK_API_PROJECT_ID],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_USER_DOMAIN', value: OPENSTACK_API_USER_DOMAIN],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_CLIENT', value: OPENSTACK_API_CLIENT],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_VERSION', value: OPENSTACK_API_VERSION]
-                    ])
-                }
-            } else {
-                if (currentBuild.result == 'FAILURE') {
-                    common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
-
-                    if (SALT_MASTER_URL) {
-                        common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
-                    }
-                }
-
-            }
-        }
-    }
-}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 67d5181..aa695f2 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -97,7 +97,7 @@
                                     'buildId'  : "${chunkJob.number}"])
 }
 
-def StepTestModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
+def StepTestModel(_basename, _reclassArtifactName, _artifactCopyPath, _useExtraRepos = false) {
     // We need to wrap what we return in a Groovy closure, or else it's invoked
     // when this method is called, not when we pass it to parallel.
     // To do this, you need to wrap the code below in { }, and either return
@@ -105,7 +105,7 @@
     // return node object
     return {
         node(slaveNode) {
-            testModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos)
+            testModel(_basename, _reclassArtifactName, _artifactCopyPath, _useExtraRepos)
         }
     }
 }
@@ -130,6 +130,12 @@
 
 def StepGenerateModels(_contextFileList, _virtualenv, _templateEnvDir) {
     return {
+        if (fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
+            // Merge contexts for nice base.yml based diff
+            dir(_templateEnvDir) {
+                sh('tox -ve merge_contexts')
+            }
+        }
         for (contextFile in _contextFileList) {
             def basename = common.GetBaseName(contextFile, '.yml')
             def contextYaml = readYaml text: readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
@@ -139,7 +145,8 @@
                 common.warningMsg('Disabling secrets_encryption_enabled for tests!')
                 contextYaml['default_context']['secrets_encryption_enabled'] = 'False'
             }
-            context = mcpCommon.dumpYAML(contextYaml)
+
+            def context = mcpCommon.dumpYAML(contextYaml)
             if (!fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
                 common.warningMsg('Forming NEW reclass-root structure...')
                 python.generateModel(context, basename, 'cfg01', _virtualenv, "${_templateEnvDir}/model", _templateEnvDir)
@@ -257,8 +264,8 @@
     // copy reclass system under envPath with -R and trailing / to support symlinks direct copy
     sh("cp -R ${archiveBaseName}/ ${envPath}/${classesSystemDir}")
     dir(envPath) {
-        for (String context : contextList) {
-            def basename = common.GetBaseName(context, '.yml')
+        for (String _context : contextList) {
+            def basename = common.GetBaseName(_context, '.yml')
             dir("${envPath}/model/${basename}/classes") {
                 sh(script: "ln -sfv ../../../${classesSystemDir} system ")
             }
diff --git a/test-model-generator.groovy b/test-model-generator.groovy
index 4134ca4..39723c6 100644
--- a/test-model-generator.groovy
+++ b/test-model-generator.groovy
@@ -168,6 +168,10 @@
             currentBuild.result = "FAILURE"
             throw e
         } finally {
+            sh (script: """map=\$(docker ps --format '{{.Names}}:{{.ID}}' --filter name=operations);\
+            for cont in \$map ; do NAME="\${cont%%:*}";ID="\${cont##*:}"; docker logs \$ID > \$NAME.log 2>&1  ;  done""")
+            archiveArtifacts "*.log"
+
             if (fileExists(testReportHTMLFile)) {
                 archiveArtifacts artifacts: testReportHTMLFile
             }
@@ -192,7 +196,7 @@
                 }
                 // Remove everything what is owned by root
                 testImage.inside(testImageOptions) {
-                    sh("rm -rf /var/lib/qa_reports/* ${env.WORKSPACE}/${apiProject} ${env.WORKSPACE}/${uiProject}")
+                    sh("rm -rf /var/lib/qa_reports/* ${env.WORKSPACE}/${apiProject} ${env.WORKSPACE}/${uiProject} ${env.WORKSPACE}/*.log")
                 }
             }
         }
diff --git a/test-run-rally.groovy b/test-run-rally.groovy
deleted file mode 100644
index 3f2339f..0000000
--- a/test-run-rally.groovy
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- *
- * Service test pipeline
- *
- * Expected parameters:
- *   SALT_MASTER_URL                 URL of Salt master
- *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
- * Test settings:
- *   IMAGE_LINK                      Link to docker image with Rally
- *   RALLY_SCENARIO                  Rally test scenario
- *   TEST_TARGET                     Salt target for Rally node
- *   CONTAINER_NAME                  Name of the Docker container which runs Rally
- *   CLEANUP_REPORTS_AND_CONTAINER   Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
- *   DO_CLEANUP_RESOURCES            If "true": runs clean-up script for removing Rally and Tempest resources
- */
-
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
-    node("python") {
-        try {
-
-            //
-            // Prepare connection
-            //
-            stage('Setup virtualenv for Pepper') {
-                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            }
-
-            //
-            // Test
-            //
-
-            stage('Run OpenStack Rally scenario') {
-                test.runRallyScenarios(pepperEnv, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
-                        DO_CLEANUP_RESOURCES)
-            }
-            stage('Copy test reports') {
-                test.copyTempestResults(pepperEnv, TEST_TARGET)
-            }
-            stage('Archiving test artifacts') {
-                test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
-            }
-        } catch (Throwable e) {
-            currentBuild.result = 'FAILURE'
-            throw e
-        } finally {
-            if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
-                stage('Cleanup reports and container') {
-                    test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
-                    test.removeDockerContainer(pepperEnv, TEST_TARGET, CONTAINER_NAME)
-                }
-            }
-        }
-    }
-}
diff --git a/test-run-tempest.groovy b/test-run-tempest.groovy
deleted file mode 100644
index 6edb276..0000000
--- a/test-run-tempest.groovy
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- *
- * Service test pipeline
- *
- * Expected parameters:
- *   SALT_MASTER_URL                 URL of Salt master
- *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
- * Test settings:
- *   IMAGE_LINK                      Link to docker image with Rally and Tempest
- *   TEST_TEMPEST_PATTERN            If not false, run tests matched to pattern only
- *   TEST_TARGET                     Salt target for tempest node
- *   CLEANUP_REPORTS                 Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
- *   SET                             Predefined set for tempest tests
- *   CONCURRENCY                     How many processes to use to run Tempest tests
- *   DO_CLEANUP_RESOURCES            If "true": runs clean-up script for removing Rally and Tempest resources
- */
-
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
-    node("python") {
-        try {
-
-            stage('Setup virtualenv for Pepper') {
-                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            }
-
-            //
-            // Test
-            //
-
-            stage('Run OpenStack Tempest tests') {
-                test.runTempestTests(pepperEnv, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
-                        "/home/rally/keystonercv3", SET, CONCURRENCY, "mcp.conf", "mcp_skip.list", "/root/keystonercv3",
-                        "/root/rally_reports", DO_CLEANUP_RESOURCES)
-            }
-            stage('Copy test reports') {
-                test.copyTempestResults(pepperEnv, TEST_TARGET)
-            }
-            stage('Archiving test artifacts') {
-                test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
-            }
-        } catch (Throwable e) {
-            currentBuild.result = 'FAILURE'
-            throw e
-        } finally {
-            if (CLEANUP_REPORTS.toBoolean()) {
-                stage('Cleanup reports') {
-                    test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
-                }
-            }
-        }
-    }
-}
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index e007fe9..de631bf 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -22,6 +22,11 @@
   openstack_credentials_id = OPENSTACK_API_CREDENTIALS
 }
 
+env.GERRIT_BRANCH = 'master'
+if (common.validInputParam('GERRIT_PARENT_BRANCH')) {
+  env.GERRIT_BRANCH = GERRIT_PARENT_BRANCH
+}
+
 def checkouted = false
 def openstackTest = false
 def travisLess = false      /** TODO: Remove once formulas are witched to new config */
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 4326433..640cfed 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -9,13 +9,21 @@
 common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 def ruby = new com.mirantis.mk.Ruby()
+def dockerLib = new com.mirantis.mk.Docker()
 
 def gerritRef = env.GERRIT_REFSPEC ?: null
 def defaultGitRef = env.DEFAULT_GIT_REF ?: null
 def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
 def slaveNode = env.SLAVE_NODE ?: 'virtual'
 def saltVersion = env.SALT_VERSION ?: ""
-def dockerLib = new com.mirantis.mk.Docker()
+
+gerritBranch = 'master'
+if (common.validInputParam('GERRIT_BRANCH')) {
+  gerritBranch = env.GERRIT_BRANCH
+} else if (common.validInputParam('GATING_GERRIT_BRANCH')) {
+    gerritBranch = env.GATING_GERRIT_BRANCH
+  }
+
 
 def checkouted = false
 
@@ -59,7 +67,8 @@
     [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: defaultGitRef],
     [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: defaultGitUrl],
     [$class: 'StringParameterValue', name: 'SALT_OPTS', value: SALT_OPTS],
-    [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
+    [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION],
+    [$class: 'StringParameterValue', name: 'GERRIT_PARENT_BRANCH', value: gerritBranch]
   ]
 }
 
diff --git a/test-salt-model-wrapper.groovy b/test-salt-model-wrapper.groovy
deleted file mode 100644
index 118431a..0000000
--- a/test-salt-model-wrapper.groovy
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- Global CI wrapper for testing next projects:
-   - salt-models/reclass-system
-   - mk/cookiecutter-templates
-
- Wrapper allows to test cross-project patches, based on
- 'Depends-On: http://<gerrit_address>/<change_number>' key phrase
- */
-
-import groovy.json.JsonOutput
-
-gerrit = new com.mirantis.mk.Gerrit()
-
-cookiecutterTemplatesRepo = 'mk/cookiecutter-templates'
-reclassSystemRepo = 'salt-models/reclass-system'
-slaveNode = env.getProperty('SLAVE_NODE') ?: 'virtual'
-
-voteMatrix = [
-    'test-mk-cookiecutter-templates' : true,
-    'test-drivetrain'                : true,
-    'oscore-test-cookiecutter-models': false,
-    'test-salt-model-infra'          : true,
-    'test-salt-model-mcp-virtual-lab': false,
-]
-
-baseGerritConfig = [:]
-buildTestParams = [:]
-jobResultComments = [:]
-commentLock = false
-
-// post Gerrit review comment to patch
-def setGerritReviewComment() {
-    if (baseGerritConfig) {
-        while (commentLock) {
-            sleep 5
-        }
-        commentLock = true
-        LinkedHashMap config = baseGerritConfig.clone()
-        String jobResultComment = ''
-        jobResultComments.each { threadName, info ->
-            String skipped = voteMatrix.get(info.job, 'true') ? '' : '(non-voting)'
-            jobResultComment += "- ${threadName} ${info.url}console : ${info.status} ${skipped}".trim() + '\n'
-        }
-        config['message'] = sh(script: "echo '${jobResultComment}'", returnStdout: true).trim()
-        gerrit.postGerritComment(config)
-        commentLock = false
-    }
-}
-
-// get job parameters for YAML-based job parametrization
-def yamlJobParameters(LinkedHashMap jobParams) {
-    return [
-        [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: JsonOutput.toJson(jobParams)]
-    ]
-}
-
-// run needed job with params
-def runTests(String jobName, ArrayList jobParams, String threadName = '', Boolean voteOverride = null) {
-    threadName = threadName ? threadName : jobName
-    def propagateStatus = voteOverride != null ? voteOverride : voteMatrix.get(jobName, true)
-    return {
-        def jobBuild = build job: jobName, propagate: false, parameters: jobParams
-        jobResultComments[threadName] = ['url': jobBuild.absoluteUrl, 'status': jobBuild.result, 'job': jobName]
-        setGerritReviewComment()
-        if (propagateStatus && jobBuild.result == 'FAILURE') {
-            throw new Exception("Build ${threadName} is failed!")
-        }
-    }
-}
-
-// set params based on depending patches
-def setupDependingVars(LinkedHashMap dependingProjects) {
-    if (dependingProjects) {
-        if (dependingProjects.containsKey(reclassSystemRepo)) {
-            buildTestParams['RECLASS_SYSTEM_GIT_REF'] = dependingProjects[reclassSystemRepo].ref
-            buildTestParams['RECLASS_SYSTEM_BRANCH'] = dependingProjects[reclassSystemRepo].branch
-        }
-        if (dependingProjects.containsKey(cookiecutterTemplatesRepo)) {
-            buildTestParams['COOKIECUTTER_TEMPLATE_REF'] = dependingProjects[cookiecutterTemplatesRepo].ref
-            buildTestParams['COOKIECUTTER_TEMPLATE_BRANCH'] = dependingProjects[cookiecutterTemplatesRepo].branch
-        }
-    }
-}
-
-timeout(time: 12, unit: 'HOURS') {
-    node(slaveNode) {
-        def common = new com.mirantis.mk.Common()
-
-        // Var EXTRA_VARIABLES_YAML contains any additional parameters for tests,
-        // like manually specified Gerrit Refs/URLs, additional parameters and so on
-        def buildTestParamsYaml = env.getProperty('EXTRA_VARIABLES_YAML')
-        if (buildTestParamsYaml) {
-            common.mergeEnv(env, buildTestParamsYaml)
-            buildTestParams = readYaml text: buildTestParamsYaml
-        }
-
-        // init required job variables
-        LinkedHashMap job_env = env.getEnvironment().findAll { k, v -> v }
-
-        // Gerrit parameters
-        String gerritCredentials = job_env.get('CREDENTIALS_ID', 'gerrit')
-        String gerritRef = job_env.get('GERRIT_REFSPEC')
-        String gerritProject = job_env.get('GERRIT_PROJECT')
-        String gerritName = job_env.get('GERRIT_NAME')
-        String gerritScheme = job_env.get('GERRIT_SCHEME')
-        String gerritHost = job_env.get('GERRIT_HOST')
-        String gerritPort = job_env.get('GERRIT_PORT')
-        String gerritChangeNumber = job_env.get('GERRIT_CHANGE_NUMBER')
-        String gerritPatchSetNumber = job_env.get('GERRIT_PATCHSET_NUMBER')
-        String gerritBranch = job_env.get('GERRIT_BRANCH')
-        Boolean gateMode = job_env.get('GERRIT_CI_MERGE_TRIGGER', false).toBoolean()
-
-        // Common and manual build parameters
-        LinkedHashMap projectsMap = [:]
-        String distribRevision = 'nightly'
-        //checking if the branch is from release
-        if (gerritBranch.startsWith('release')) {
-            distribRevision = gerritBranch.tokenize('/')[-1]
-            // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
-            // After 2018q4 releases, need to also check 'static' repo, for example ubuntu.
-            binTest = common.checkRemoteBinary(['mcp_version': distribRevision])
-            if (!binTest.linux_system_repo_url || !binTest.linux_system_repo_ubuntu_url) {
-                common.errorMsg("Binary release: ${distribRevision} not exist or not full. Fallback to 'proposed'! ")
-                distribRevision = 'proposed'
-            }
-        }
-        ArrayList testModels = job_env.get('TEST_MODELS', 'mcp-virtual-lab,infra').split(',')
-
-        stage('Gerrit prepare') {
-            // check if change aren't already merged
-            def gerritChange = gerrit.getGerritChange(gerritName, gerritHost, gerritChangeNumber, gerritCredentials)
-            if (gerritChange.status == "MERGED") {
-                common.successMsg('Patch set is alredy merged, no need to test it')
-                currentBuild.result = 'SUCCESS'
-                return
-            }
-            buildTestParams << job_env.findAll { k, v -> k ==~ /GERRIT_.+/ }
-            baseGerritConfig = [
-                'gerritName'          : gerritName,
-                'gerritHost'          : gerritHost,
-                'gerritPort'          : gerritPort,
-                'gerritChangeNumber'  : gerritChangeNumber,
-                'credentialsId'       : gerritCredentials,
-                'gerritPatchSetNumber': gerritPatchSetNumber,
-            ]
-            LinkedHashMap gerritDependingProjects = gerrit.getDependentPatches(baseGerritConfig)
-            setupDependingVars(gerritDependingProjects)
-            ArrayList descriptionMsgs = [
-                "Running with next parameters:",
-                "Ref for ${gerritProject} => ${gerritRef}",
-                "Branch for ${gerritProject} => ${gerritBranch}"
-            ]
-            descriptionMsgs.add("Distrib revision => ${distribRevision}")
-            for (String project in gerritDependingProjects.keySet()) {
-                descriptionMsgs.add("---")
-                descriptionMsgs.add("Depending patch to ${project} found:")
-                descriptionMsgs.add("Ref for ${project} => ${gerritDependingProjects[project]['ref']}")
-                descriptionMsgs.add("Branch for ${project} => ${gerritDependingProjects[project]['branch']}")
-            }
-            currentBuild.description = descriptionMsgs.join('<br/>')
-            gerrit.gerritPatchsetCheckout([
-                credentialsId: gerritCredentials
-            ])
-        }
-
-        stage("Run tests") {
-            def documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
-            if (documentationOnly) {
-                common.infoMsg("Tests skipped, documenation only changed!")
-                currentBuild.result = 'SUCCESS'
-                return
-            }
-
-            def branches = [:]
-            branches.failFast = false
-            String branchJobName = ''
-
-            if (gerritProject == reclassSystemRepo && gerritBranch == 'master') {
-                sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD  | grep .yml | xargs -I {}  python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
-                def defaultSystemURL = "${gerritScheme}://${gerritName}@${gerritHost}:${gerritPort}/${gerritProject}"
-                for (int i = 0; i < testModels.size(); i++) {
-                    def cluster = testModels[i]
-                    def clusterGitUrl = defaultSystemURL.substring(0, defaultSystemURL.lastIndexOf("/") + 1) + cluster
-                    branchJobName = "test-salt-model-${cluster}"
-                    def jobParams = [
-                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
-                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
-                        [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultSystemURL],
-                        [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: gerritRef],
-                    ]
-                    branches[branchJobName] = runTests(branchJobName, jobParams)
-                }
-            }
-            if (gerritProject == reclassSystemRepo || gerritProject == cookiecutterTemplatesRepo) {
-                branchJobName = 'test-mk-cookiecutter-templates'
-                branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
-            }
-
-            if (!gateMode) {
-                // testing backward compatibility
-                if (gerritBranch == 'master' && gerritProject == reclassSystemRepo) {
-                    def backwardCompatibilityRefsToTest = ['proposed', 'release/2018.11.0', 'release/2019.2.0']
-                    for (String oldRef in backwardCompatibilityRefsToTest) {
-                        LinkedHashMap buildTestParamsOld = buildTestParams.clone()
-                        buildTestParamsOld['COOKIECUTTER_TEMPLATE_REF'] = ''
-                        buildTestParamsOld['COOKIECUTTER_TEMPLATE_BRANCH'] = oldRef
-                        String threadName = "${branchJobName}-${oldRef}"
-                        // disable votes for release/2018.11.0 branch
-                        overrideVote = oldRef == 'release/2018.11.0' ? false : null
-                        branches[threadName] = runTests(branchJobName, yamlJobParameters(buildTestParamsOld), threadName, overrideVote)
-                    }
-                }
-                if (gerritProject == cookiecutterTemplatesRepo) {
-                    branchJobName = 'test-drivetrain'
-                    branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
-                    branchJobName = 'oscore-test-cookiecutter-models'
-                    branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
-                }
-                if (env['GERRIT_EVENT_COMMENT_TEXT'] && new String(env['GERRIT_EVENT_COMMENT_TEXT'].decodeBase64()) =~ /\ntest_schemas.*/) {
-                    if (gerritProject == reclassSystemRepo) {
-                       branchJobName = 'oscore-test-cookiecutter-models'
-                       branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
-                    }
-                }
-            }
-            branches.keySet().each { key ->
-                if (branches[key] instanceof Closure) {
-                    jobResultComments[key] = ['url': job_env.get('BUILD_URL'), 'status': 'WAITING']
-                }
-            }
-            setGerritReviewComment()
-            parallel branches
-        }
-    }
-}
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 000c34c..930a27d 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -1,22 +1,15 @@
 /**
  *
- * Launch validation of the cloud
+ * Launch validation of the cloud with Rally
  *
  * Expected parameters:
  *
- *   ACCUMULATE_RESULTS          If true, results from the previous build will be used
  *   JOB_TIMEOUT                 Job timeout in hours
- *   RUN_RALLY_TESTS             If not false, run Rally tests
- *   RUN_SPT_TESTS               If not false, run SPT tests
- *   RUN_TEMPEST_TESTS           If not false, run Tempest tests
- *   TEST_IMAGE                  Docker image link
- *   TARGET_NODE                 Salt target for tempest node
  *   SALT_MASTER_URL             URL of Salt master
  *   SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *   VALIDATE_PARAMS             Validate job YAML params (see below)
  *
- *   Additional validate job YAML params:
- *
- *   Rally
+ *   Rally - map with parameters for starting Rally tests
  *
  *   AVAILABILITY_ZONE           The name of availability zone
  *   FLOATING_NETWORK            The name of the external(floating) network
@@ -32,149 +25,362 @@
  *   RALLY_SL_SCENARIOS          Path to file or directory with stacklight rally scenarios
  *   RALLY_TASK_ARGS_FILE        Path to file with rally tests arguments
  *   RALLY_DB_CONN_STRING        Rally-compliant DB connection string for long-term storing
-                                 results to external DB
+ *                               results to external DB
  *   RALLY_TAGS                  List of tags for marking Rally tasks. Can be used when
-                                 generating Rally trends based on particular group of tasks
+ *                               generating Rally trends based on particular group of tasks
  *   RALLY_TRENDS                If enabled, generate Rally trends report. Requires external DB
-                                 connection string to be set. If RALLY_TAGS was set, trends will
-                                 be generated based on finished tasks with these tags, otherwise
-                                 on all the finished tasks available in DB
+ *                               connection string to be set. If RALLY_TAGS was set, trends will
+ *                               be generated based on finished tasks with these tags, otherwise
+ *                               on all the finished tasks available in DB
  *   SKIP_LIST                   List of the Rally scenarios which should be skipped
- *   REPORT_DIR                  Path for reports outside docker image
  *
- *   Tempest
- *
- *   TEMPEST_TEST_SET            If not false, run tests matched to pattern only
- *   TEMPEST_CONFIG_REPO         Git repository with configuration files for Tempest
- *   TEMPEST_CONFIG_BRANCH       Git branch which will be used during the checkout
- *   TEMPEST_REPO                Git repository with Tempest
- *   TEMPEST_VERSION             Version of Tempest (tag, branch or commit)
- *   GENERATE_REPORT             If not false, run report generation command
- *
- *   SPT
- *
- *   AVAILABILITY_ZONE           The name of availability zone
- *   FLOATING_NETWORK            The name of the external(floating) network
- *   SPT_SSH_USER                The name of the user which should be used for ssh to nodes
- *   SPT_IMAGE                   The name of the image for SPT tests
- *   SPT_IMAGE_USER              The name of the user for SPT image
- *   SPT_FLAVOR                  The name of the flavor for SPT image
- *   GENERATE_REPORT             If not false, run report generation command
- *
+ *   PARALLEL_PERFORMANCE        If enabled, run Rally tests separately in parallel for each sub directory found
+ *                               inside RALLY_SCENARIOS and RALLY_SL_SCENARIOS (if STACKLIGHT_RALLY is enabled)
  */
 
 common = new com.mirantis.mk.Common()
-test = new com.mirantis.mk.Test()
 validate = new com.mirantis.mcp.Validate()
-def python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+salt_testing = new com.mirantis.mk.SaltModelTesting()
 
-def pepperEnv = "pepperEnv"
-def artifacts_dir = 'validation_artifacts/'
 def VALIDATE_PARAMS = readYaml(text: env.getProperty('VALIDATE_PARAMS')) ?: [:]
 if (! VALIDATE_PARAMS) {
     throw new Exception("VALIDATE_PARAMS yaml is empty.")
 }
+def TEST_IMAGE = env.getProperty('TEST_IMAGE') ?: 'xrally-openstack:1.4.0'
+def JOB_TIMEOUT = env.getProperty('JOB_TIMEOUT').toInteger() ?: 12
+def SLAVE_NODE = env.getProperty('SLAVE_NODE') ?: 'docker'
+def rally = VALIDATE_PARAMS.get('rally') ?: [:]
+def scenariosRepo = rally.get('RALLY_CONFIG_REPO') ?: 'https://review.gerrithub.io/Mirantis/scale-scenarios'
+def scenariosBranch = rally.get('RALLY_CONFIG_BRANCH') ?: 'master'
+def pluginsRepo = rally.get('RALLY_PLUGINS_REPO') ?: 'https://github.com/Mirantis/rally-plugins'
+def pluginsBranch = rally.get('RALLY_PLUGINS_BRANCH') ?: 'master'
+def tags = rally.get('RALLY_TAGS') ?: []
 
-if (env.JOB_TIMEOUT == ''){
-    job_timeout = 12
-} else {
-    job_timeout = env.JOB_TIMEOUT.toInteger()
-}
-timeout(time: job_timeout, unit: 'HOURS') {
-    node() {
-        try{
-            stage('Setup virtualenv for Pepper') {
-                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            }
+// contrainer working dir vars
+def rallyWorkdir = '/home/rally'
+def rallyPluginsDir = "${rallyWorkdir}/rally-plugins"
+def rallyScenariosDir = "${rallyWorkdir}/rally-scenarios"
+def rallyResultsDir = "${rallyWorkdir}/test_results"
+def rallySecrets = "${rallyWorkdir}/secrets"
 
-            stage('Configure') {
-                validate.installDocker(pepperEnv, TARGET_NODE)
-                if (ACCUMULATE_RESULTS.toBoolean() == false) {
-                    sh "rm -r ${artifacts_dir}"
+// env vars
+def env_vars = []
+def platform = [
+    type: 'unknown',
+    stacklight: [enabled: false, grafanaPass: ''],
+]
+def cmp_count
+
+// test results vars
+def testResult
+def tasksParallel = [:]
+def parallelResults = [:]
+def configRun = [:]
+
+timeout(time: JOB_TIMEOUT, unit: 'HOURS') {
+    node (SLAVE_NODE) {
+
+        // local dir vars
+        def workDir = "${env.WORKSPACE}/rally"
+        def pluginsDir = "${workDir}/rally-plugins"
+        def scenariosDir = "${workDir}/rally-scenarios"
+        def secrets = "${workDir}/secrets"
+        def artifacts = "${workDir}/validation_artifacts"
+
+        stage('Configure env') {
+
+            def master = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+            // create local directories
+            sh "rm -rf ${workDir} || true"
+            sh "mkdir -p ${artifacts} ${secrets}"
+            writeFile file: "${workDir}/entrypoint.sh", text: '''#!/bin/bash
+set -xe
+exec "$@"
+'''
+            sh "chmod 755 ${workDir}/entrypoint.sh"
+
+            // clone repo with Rally plugins and checkout refs/branch
+            checkout([
+                $class           : 'GitSCM',
+                branches         : [[name: 'FETCH_HEAD']],
+                extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: pluginsDir]],
+                userRemoteConfigs: [[url: pluginsRepo, refspec: pluginsBranch]],
+            ])
+
+            // clone scenarios repo and switch branch / fetch refspecs
+            checkout([
+                $class           : 'GitSCM',
+                branches         : [[name: 'FETCH_HEAD']],
+                extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: scenariosDir]],
+                userRemoteConfigs: [[url: scenariosRepo, refspec: scenariosBranch]],
+            ])
+
+            // get number of computes in the cluster
+            platform['cluster_name'] = salt.getPillar(
+                master, 'I@salt:master', '_param:cluster_name'
+            )['return'][0].values()[0]
+            def rcs_str_node = salt.getPillar(
+                master, 'I@salt:master', 'reclass:storage:node'
+            )['return'][0].values()[0]
+
+            // set up Openstack env variables
+            if (rally.get('K8S_RALLY').toBoolean() == false) {
+
+                platform['type'] = 'openstack'
+                platform['cmp_count'] = rcs_str_node.openstack_compute_rack01['repeat']['count']
+                def rally_variables = [
+                    "floating_network=${rally.FLOATING_NETWORK}",
+                    "rally_image=${rally.RALLY_IMAGE}",
+                    "rally_flavor=${rally.RALLY_FLAVOR}",
+                    "availability_zone=${rally.AVAILABILITY_ZONE}",
+                ]
+
+                env_vars = validate._get_keystone_creds_v3(master)
+                if (!env_vars) {
+                    env_vars = validate._get_keystone_creds_v2(master)
                 }
-                sh "mkdir -p ${artifacts_dir}"
+                env_vars = env_vars + rally_variables
+
+            } else {
+            // set up Kubernetes env variables get required secrets
+                platform['type'] = 'k8s'
+                platform['cmp_count'] = rcs_str_node.kubernetes_compute_rack01['repeat']['count']
+
+                def kubernetes = salt.getPillar(
+                    master, 'I@kubernetes:master and *01*', 'kubernetes:master'
+                )['return'][0].values()[0]
+
+                env_vars = [
+                    "KUBERNETES_HOST=http://${kubernetes.apiserver.vip_address}" +
+                    ":${kubernetes.apiserver.insecure_port}",
+                    "KUBERNETES_CERT_AUTH=${rallySecrets}/k8s-ca.crt",
+                    "KUBERNETES_CLIENT_KEY=${rallySecrets}/k8s-client.key",
+                    "KUBERNETES_CLIENT_CERT=${rallySecrets}/k8s-client.crt",
+                ]
+
+                // get K8S certificates to manage cluster
+                def k8s_ca = salt.getFileContent(
+                    master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/ca-kubernetes.crt'
+                )
+                def k8s_client_key = salt.getFileContent(
+                    master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.key'
+                )
+                def k8s_client_crt = salt.getFileContent(
+                    master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.crt'
+                )
+                writeFile file: "${secrets}/k8s-ca.crt", text: k8s_ca
+                writeFile file: "${secrets}/k8s-client.key", text: k8s_client_key
+                writeFile file: "${secrets}/k8s-client.crt", text: k8s_client_crt
+
             }
 
-            stage('Run Tempest tests') {
-                if (RUN_TEMPEST_TESTS.toBoolean() == true) {
-                    def tempest = VALIDATE_PARAMS.get('tempest') ?: []
-                    validate.runTempestTests(
-                        pepperEnv, TARGET_NODE, TEST_IMAGE,
-                        artifacts_dir, tempest.TEMPEST_CONFIG_REPO,
-                        tempest.TEMPEST_CONFIG_BRANCH, tempest.TEMPEST_REPO,
-                        tempest.TEMPEST_VERSION, tempest.TEMPEST_TEST_SET
+            // get Stacklight data
+            if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
+                platform['stacklight']['enabled'] = true
+
+                def grafana = salt.getPillar(
+                    master, 'I@grafana:client', 'grafana:client:server'
+                )['return'][0].values()[0]
+
+                platform['stacklight']['grafanaPass'] = grafana['password']
+            }
+
+            if (! rally.PARALLEL_PERFORMANCE.toBoolean()) {
+
+                // Define map with docker commands
+                def commands = validate.runRallyTests(
+                    platform, rally.RALLY_SCENARIOS,
+                    rally.RALLY_SL_SCENARIOS, rally.RALLY_TASK_ARGS_FILE,
+                    rally.RALLY_DB_CONN_STRING, tags,
+                    rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+                )
+                def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
+
+                configRun = [
+                    'image': TEST_IMAGE,
+                    'baseRepoPreConfig': false,
+                    'dockerMaxCpus': 2,
+                    'dockerHostname': 'localhost',
+                    'dockerExtraOpts': [
+                        "--network=host",
+                        "--entrypoint=/entrypoint.sh",
+                        "-w ${rallyWorkdir}",
+                        "-v ${workDir}/entrypoint.sh:/entrypoint.sh",
+                        "-v ${pluginsDir}/:${rallyPluginsDir}",
+                        "-v ${scenariosDir}/:${rallyScenariosDir}",
+                        "-v ${artifacts}/:${rallyResultsDir}",
+                        "-v ${secrets}/:${rallySecrets}",
+                    ],
+                    'envOpts'         : env_vars,
+                    'runCommands'     : commands_list,
+                ]
+                common.infoMsg('Docker config:')
+                println configRun
+                common.infoMsg('Docker commands list:')
+                println commands
+
+            } else {
+
+                // Perform parallel testing of the components with Rally
+                def components = [
+                    Common: [],
+                    Stacklight: [],
+                ]
+
+                // get list of directories inside scenarios path
+                def scenPath = "${scenariosDir}/${rally.RALLY_SCENARIOS}"
+                def mainComponents = sh(
+                    script: "find ${scenPath} -maxdepth 1 -mindepth 1 -type d -exec basename {} \\;",
+                    returnStdout: true,
+                ).trim()
+                if (! mainComponents) {
+                    error(
+                        "No directories found inside RALLY_SCENARIOS ${rally.RALLY_SCENARIOS}\n" +
+                        "Either set PARALLEL_PERFORMANCE=false or populate ${rally.RALLY_SCENARIOS} " +
+                        "with component directories which include corresponding scenarios"
                     )
-                    if (tempest.GENERATE_REPORT.toBoolean() == true) {
-                        common.infoMsg("Generating html test report ...")
-                        validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
-                    }
-                } else {
-                    common.infoMsg("Skipping Tempest tests")
                 }
-            }
+                components['Common'].addAll(mainComponents.split('\n'))
+                common.infoMsg( "Adding for parallel execution sub dirs found in " +
+                    "RALLY_SCENARIOS (${rally.RALLY_SCENARIOS}):"
+                )
+                print mainComponents
 
-            stage('Run Rally tests') {
-                if (RUN_RALLY_TESTS.toBoolean() == true) {
-                    def rally = VALIDATE_PARAMS.get('rally') ?: []
-                    def tags = rally.get('RALLY_TAGS') ?: []
-                    def report_dir = rally.REPORT_DIR ?: '/root/qa_results'
-                    def platform = ["type":"unknown", "stacklight_enabled":false]
-                    def rally_variables = []
-                    if (rally.K8S_RALLY.toBoolean() == false) {
-                      platform['type'] = 'openstack'
-                      rally_variables = ["floating_network=${rally.FLOATING_NETWORK}",
-                                         "rally_image=${rally.RALLY_IMAGE}",
-                                         "rally_flavor=${rally.RALLY_FLAVOR}",
-                                         "availability_zone=${rally.AVAILABILITY_ZONE}"]
-                    } else {
-                      platform['type'] = 'k8s'
+                if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
+                    def slScenPath = "${scenariosDir}/${rally.RALLY_SL_SCENARIOS}"
+                    def slComponents = sh(
+                        script: "find ${slScenPath} -maxdepth 1 -mindepth 1 -type d -exec basename {} \\;",
+                        returnStdout: true,
+                    ).trim()
+                    if (! slComponents) {
+                        error(
+                            "No directories found inside RALLY_SCENARIOS ${rally.RALLY_SL_SCENARIOS}\n" +
+                            "Either set PARALLEL_PERFORMANCE=false or populate ${rally.RALLY_SL_SCENARIOS} " +
+                            "with component directories which include corresponding scenarios"
+                        )
                     }
-                    if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
-                      platform['stacklight_enabled'] = true
+                    components['Stacklight'].addAll(slComponents.split('\n'))
+                    common.infoMsg( "Adding for parallel execution sub dirs found in " +
+                        "RALLY_SL_SCENARIOS (${rally.RALLY_SL_SCENARIOS}):"
+                    )
+                    print slComponents
+                }
+
+                // build up a map with tasks for parallel execution
+                def allComponents = components.values().flatten()
+                for (int i=0; i < allComponents.size(); i++) {
+                    // randomize run so we don't bump each other at the startup
+                    // also we need to let first thread create rally deployment
+                    // so all the rest rally threads can use it after
+                    def sleepSeconds = 15 * i
+
+                    def task = allComponents[i]
+                    def task_name = 'rally_' + task
+                    def curComponent = components.find { task in it.value }.key
+                    // inherit platform common data
+                    def curPlatform = platform
+
+                    // setup scenarios and stacklight switch per component
+                    def commonScens = "${rally.RALLY_SCENARIOS}/${task}"
+                    def stacklightScens = "${rally.RALLY_SL_SCENARIOS}/${task}"
+
+                    switch (curComponent) {
+                        case 'Common':
+                            stacklightScens = ''
+                            curPlatform['stacklight']['enabled'] = false
+                        break
+                        case 'Stacklight':
+                            commonScens = ''
+                            curPlatform['stacklight']['enabled'] = true
+                        break
                     }
-                    validate.runRallyTests(
-                        pepperEnv, TARGET_NODE, TEST_IMAGE,
-                        platform, artifacts_dir, rally.RALLY_CONFIG_REPO,
-                        rally.RALLY_CONFIG_BRANCH, rally.RALLY_PLUGINS_REPO,
-                        rally.RALLY_PLUGINS_BRANCH, rally.RALLY_SCENARIOS,
-                        rally.RALLY_SL_SCENARIOS, rally.RALLY_TASK_ARGS_FILE,
+
+                    def curCommands = validate.runRallyTests(
+                        curPlatform, commonScens,
+                        stacklightScens, rally.RALLY_TASK_ARGS_FILE,
                         rally.RALLY_DB_CONN_STRING, tags,
-                        rally.RALLY_TRENDS, rally_variables,
-                        report_dir, rally.SKIP_LIST
+                        rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
                     )
-                } else {
-                    common.infoMsg("Skipping Rally tests")
-                }
-            }
 
-            stage('Run SPT tests') {
-                if (RUN_SPT_TESTS.toBoolean() == true) {
-                    def spt = VALIDATE_PARAMS.get('spt') ?: []
-                    def spt_variables = ["spt_ssh_user=${spt.SPT_SSH_USER}",
-                                         "spt_floating_network=${spt.FLOATING_NETWORK}",
-                                         "spt_image=${spt.SPT_IMAGE}",
-                                         "spt_user=${spt.SPT_IMAGE_USER}",
-                                         "spt_flavor=${spt.SPT_FLAVOR}",
-                                         "spt_availability_zone=${spt.AVAILABILITY_ZONE}"]
-                    validate.runSptTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, spt_variables)
+                    // copy required files for the current task
+                    def taskWorkDir = "${env.WORKSPACE}/rally_" + task
+                    def taskPluginsDir = "${taskWorkDir}/rally-plugins"
+                    def taskScenariosDir = "${taskWorkDir}/rally-scenarios"
+                    def taskArtifacts = "${taskWorkDir}/validation_artifacts"
+                    def taskSecrets = "${taskWorkDir}/secrets"
+                    sh "rm -rf ${taskWorkDir} || true"
+                    sh "cp -ra ${workDir} ${taskWorkDir}"
 
-                    if (spt.GENERATE_REPORT.toBoolean() == true) {
-                        common.infoMsg("Generating html test report ...")
-                        validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
+                    def curCommandsList = curCommands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
+                    def curConfigRun = [
+                        'image': TEST_IMAGE,
+                        'baseRepoPreConfig': false,
+                        'dockerMaxCpus': 2,
+                        'dockerHostname': 'localhost',
+                        'dockerExtraOpts': [
+                            "--network=host",
+                            "--entrypoint=/entrypoint.sh",
+                            "-w ${rallyWorkdir}",
+                            "-v ${taskWorkDir}/entrypoint.sh:/entrypoint.sh",
+                            "-v ${taskPluginsDir}/:${rallyPluginsDir}",
+                            "-v ${taskScenariosDir}/:${rallyScenariosDir}",
+                            "-v ${taskArtifacts}/:${rallyResultsDir}",
+                            "-v ${taskSecrets}/:${rallySecrets}",
+                        ],
+                        'envOpts'         : env_vars,
+                        'runCommands'     : curCommandsList,
+                    ]
+
+                    tasksParallel['rally_' + task] = {
+                        sleep sleepSeconds
+                        common.infoMsg("Docker config for task $task")
+                        println curConfigRun
+                        common.infoMsg("Docker commands list for task $task")
+                        println curCommands
+                        parallelResults[task_name] = salt_testing.setupDockerAndTest(curConfigRun)
                     }
-                } else {
-                    common.infoMsg("Skipping SPT tests")
                 }
             }
+        }
 
-            stage('Collect results') {
-                archiveArtifacts artifacts: "${artifacts_dir}/*"
+        stage('Run Rally tests') {
+
+            def dockerStatuses = [:]
+
+            // start tests in Docker
+            if (! rally.PARALLEL_PERFORMANCE.toBoolean()) {
+                testResult = salt_testing.setupDockerAndTest(configRun)
+                dockerStatuses['rally'] = (testResult) ? 'OK' : 'FAILED'
+            } else {
+                common.infoMsg('Jobs to run in threads: ' + tasksParallel.keySet().join(' '))
+                parallel tasksParallel
+                parallelResults.each { task ->
+                    dockerStatuses[task.key] = (task.value) ? 'OK' : 'FAILED'
+                }
             }
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-            throw e
+            // safely archiving all possible results
+            dockerStatuses.each { task ->
+                print "Collecting results for ${task.key} (docker status = '${task.value}')"
+                try {
+                    archiveArtifacts artifacts: "${task.key}/validation_artifacts/*"
+                } catch (Throwable e) {
+                    print 'failed to get artifacts'
+                }
+            }
+            // setting final job status
+            def failed = dockerStatuses.findAll { it.value == 'FAILED' }
+            if (failed.size() == dockerStatuses.size()) {
+                currentBuild.result = 'FAILURE'
+            } else if (dockerStatuses.find { it.value != 'OK' }) {
+                currentBuild.result = 'UNSTABLE'
+            }
+        }
+
+        stage('Clean env') {
+            // remove secrets
+            sh 'find ./ -type d -name secrets -exec rm -rf \\\"{}\\\" \\; || true'
         }
     }
 }