Merge "Add RECLASS_SYSTEM_BRANCH field"
diff --git a/cvp-sanity.groovy b/cvp-sanity.groovy
deleted file mode 100644
index 7adca5a..0000000
--- a/cvp-sanity.groovy
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- *
- * Launch sanity verification of the cloud
- *
- * Expected parameters:
- *   SALT_MASTER_URL             URL of Salt master
- *   SALT_MASTER_CREDENTIALS     Credentials to the Salt API
- *
- *   SANITY_TESTS_SET            Leave empty for full run or choose a file (test), e.g. test_mtu.py
- *   SANITY_TESTS_REPO           CVP-sanity-checks repo to clone
- *   SANITY_TESTS_SETTINGS       Additional envrionment variables for cvp-sanity-checks
- *   PROXY                       Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-def artifacts_dir = 'validation_artifacts/'
-timeout(time: 12, unit: 'HOURS') {
-    node() {
-        try{
-            stage('Initialization') {
-                validate.prepareVenv(SANITY_TESTS_REPO, PROXY)
-            }
-
-            stage('Run Infra tests') {
-                sh "mkdir -p ${artifacts_dir}"
-                validate.runSanityTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, SANITY_TESTS_SET, artifacts_dir, SANITY_TESTS_SETTINGS)
-            }
-            stage ('Publish results') {
-                archiveArtifacts artifacts: "${artifacts_dir}/*"
-                junit "${artifacts_dir}/*.xml"
-            }
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            throw e
-        }
-    }
-}
diff --git a/cvp-spt.groovy b/cvp-spt.groovy
deleted file mode 100644
index b9d53d5..0000000
--- a/cvp-spt.groovy
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- *
- * Launch pytest frameworks in Jenkins
- *
- * Expected parameters:
- *   SALT_MASTER_URL                 URL of Salt master
- *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
- *
- *   TESTS_SET                       Leave empty for full run or choose a file (test)
- *   TESTS_REPO                      Repo to clone
- *   TESTS_SETTINGS                  Additional environment varibales to apply
- *   PROXY                           Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-node() {
-    try{
-        stage('Initialization') {
-            validate.prepareVenv(TESTS_REPO, PROXY)
-        }
-
-        stage('Run Tests') {
-            validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, '', TESTS_SETTINGS)
-        }
-        stage ('Publish results') {
-            archiveArtifacts artifacts: "*"
-            junit "*.xml"
-            plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
-                 group: 'SPT',
-                 style: 'line',
-                 title: 'SPT Glance results',
-                 xmlSeries: [[
-                 file: "report.xml",
-                 nodeType: 'NODESET',
-                 url: '',
-                 xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
-            plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
-                 group: 'SPT',
-                 style: 'line',
-                 title: 'SPT HW2HW results',
-                 xmlSeries: [[
-                 file: "report.xml",
-                 nodeType: 'NODESET',
-                 url: '',
-                 xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
-            plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
-                 group: 'SPT',
-                 style: 'line',
-                 title: 'SPT VM2VM results',
-                 xmlSeries: [[
-                 file: "report.xml",
-                 nodeType: 'NODESET',
-                 url: '',
-                 xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
-        }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
-    }
-}
diff --git a/cvp-stacklight.groovy b/cvp-stacklight.groovy
deleted file mode 100644
index e7ce974..0000000
--- a/cvp-stacklight.groovy
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- *
- * Temporary pipeline for running cvp-stacklight job
- *
- * Expected parameters:
- *   SALT_MASTER_URL                 URL of Salt master
- *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
- *
- *   TESTS_SET                       Leave empty for full run or choose a file (test)
- *   TESTS_REPO                      Repo to clone
- *   TESTS_SETTINGS                  Additional environment varibales to apply
- *   PROXY                           Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-def artifacts_dir = 'validation_artifacts/'
-
-node() {
-    stage('Initialization') {
-        validate.prepareVenv(TESTS_REPO, PROXY)
-    }
-
-    stage('Run Tests') {
-        sh "mkdir -p ${artifacts_dir}"
-        validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
-    }
-    stage ('Publish results') {
-        archiveArtifacts artifacts: "${artifacts_dir}/*"
-        junit "${artifacts_dir}/*.xml"
-    }
-}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 72c5cba..99ee3ea 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -8,6 +8,7 @@
  **/
 import static groovy.json.JsonOutput.toJson
 import static groovy.json.JsonOutput.prettyPrint
+import org.apache.commons.net.util.SubnetUtils
 
 common = new com.mirantis.mk.Common()
 common2 = new com.mirantis.mcp.Common()
@@ -294,6 +295,16 @@
                     sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
                 }
 
+                // calculate netmask
+                def deployNetworkSubnet = ''
+                if (context.get('deploy_network_subnet')) {
+                    def subnet = new SubnetUtils(context['deploy_network_subnet'])
+                    deployNetworkSubnet = subnet.getInfo().getNetmask()
+                } else if (context.get('deploy_network_netmask')) { // case for 2018.4.0
+                    deployNetworkSubnet = context['deploy_network_netmask']
+                } else {
+                    error('Neither context parameter deploy_network_subnet or deploy_network_netmask should be set!')
+                }
                 // create cfg config-drive
                 if (outdateGeneration) {
                     args += ["--hostname ${context['salt_master_hostname']}", "${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"]
@@ -301,10 +312,10 @@
                 } else {
                     args += [
                         "--name ${context['salt_master_hostname']}", "--hostname ${context['salt_master_hostname']}.${context['cluster_domain']}", "--clean-up",
-                        "--ip ${context['salt_master_management_address']}", "--netmask ${context['deploy_network_netmask']}", "--gateway ${context['deploy_network_gateway']}",
+                        "--ip ${context['salt_master_management_address']}", "--netmask ${deployNetworkSubnet}", "--gateway ${context['deploy_network_gateway']}",
                         "--dns-nameservers ${context['dns_server01']},${context['dns_server02']}"
                     ]
-                    sh "python ./create-config-drive.py ${args.join(' ')}"
+                    sh "chmod 0755 create-config-drive.py ; ./create-config-drive.py ${args.join(' ')}"
                 }
                 sh("mkdir output-${context['cluster_name']} && mv ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso output-${context['cluster_name']}/")
 
@@ -332,7 +343,7 @@
                         sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
                     } else {
                         args = [
-                            "--ip ${context['aptly_server_deploy_address']}", "--netmask ${context['deploy_network_netmask']}", "--gateway ${context['deploy_network_gateway']}",
+                            "--ip ${context['aptly_server_deploy_address']}", "--netmask ${deployNetworkSubnet}", "--gateway ${context['deploy_network_gateway']}",
                             "--user-data mirror_config", "--hostname ${aptlyServerHostname}.${context['cluster_domain']}", "--name ${aptlyServerHostname}", "--clean-up",
                             "--dns-nameservers ${context['dns_server01']},${context['dns_server02']}"
                         ]
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 000c34c..930a27d 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -1,22 +1,15 @@
 /**
  *
- * Launch validation of the cloud
+ * Launch validation of the cloud with Rally
  *
  * Expected parameters:
  *
- *   ACCUMULATE_RESULTS          If true, results from the previous build will be used
  *   JOB_TIMEOUT                 Job timeout in hours
- *   RUN_RALLY_TESTS             If not false, run Rally tests
- *   RUN_SPT_TESTS               If not false, run SPT tests
- *   RUN_TEMPEST_TESTS           If not false, run Tempest tests
- *   TEST_IMAGE                  Docker image link
- *   TARGET_NODE                 Salt target for tempest node
  *   SALT_MASTER_URL             URL of Salt master
  *   SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *   VALIDATE_PARAMS             Validate job YAML params (see below)
  *
- *   Additional validate job YAML params:
- *
- *   Rally
+ *   Rally - map with parameters for starting Rally tests
  *
  *   AVAILABILITY_ZONE           The name of availability zone
  *   FLOATING_NETWORK            The name of the external(floating) network
@@ -32,149 +25,362 @@
  *   RALLY_SL_SCENARIOS          Path to file or directory with stacklight rally scenarios
  *   RALLY_TASK_ARGS_FILE        Path to file with rally tests arguments
  *   RALLY_DB_CONN_STRING        Rally-compliant DB connection string for long-term storing
-                                 results to external DB
+ *                               results to external DB
  *   RALLY_TAGS                  List of tags for marking Rally tasks. Can be used when
-                                 generating Rally trends based on particular group of tasks
+ *                               generating Rally trends based on particular group of tasks
  *   RALLY_TRENDS                If enabled, generate Rally trends report. Requires external DB
-                                 connection string to be set. If RALLY_TAGS was set, trends will
-                                 be generated based on finished tasks with these tags, otherwise
-                                 on all the finished tasks available in DB
+ *                               connection string to be set. If RALLY_TAGS was set, trends will
+ *                               be generated based on finished tasks with these tags, otherwise
+ *                               on all the finished tasks available in DB
  *   SKIP_LIST                   List of the Rally scenarios which should be skipped
- *   REPORT_DIR                  Path for reports outside docker image
  *
- *   Tempest
- *
- *   TEMPEST_TEST_SET            If not false, run tests matched to pattern only
- *   TEMPEST_CONFIG_REPO         Git repository with configuration files for Tempest
- *   TEMPEST_CONFIG_BRANCH       Git branch which will be used during the checkout
- *   TEMPEST_REPO                Git repository with Tempest
- *   TEMPEST_VERSION             Version of Tempest (tag, branch or commit)
- *   GENERATE_REPORT             If not false, run report generation command
- *
- *   SPT
- *
- *   AVAILABILITY_ZONE           The name of availability zone
- *   FLOATING_NETWORK            The name of the external(floating) network
- *   SPT_SSH_USER                The name of the user which should be used for ssh to nodes
- *   SPT_IMAGE                   The name of the image for SPT tests
- *   SPT_IMAGE_USER              The name of the user for SPT image
- *   SPT_FLAVOR                  The name of the flavor for SPT image
- *   GENERATE_REPORT             If not false, run report generation command
- *
+ *   PARALLEL_PERFORMANCE        If enabled, run Rally tests separately in parallel for each sub directory found
+ *                               inside RALLY_SCENARIOS and RALLY_SL_SCENARIOS (if STACKLIGHT_RALLY is enabled)
  */
 
 common = new com.mirantis.mk.Common()
-test = new com.mirantis.mk.Test()
 validate = new com.mirantis.mcp.Validate()
-def python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+salt_testing = new com.mirantis.mk.SaltModelTesting()
 
-def pepperEnv = "pepperEnv"
-def artifacts_dir = 'validation_artifacts/'
 def VALIDATE_PARAMS = readYaml(text: env.getProperty('VALIDATE_PARAMS')) ?: [:]
 if (! VALIDATE_PARAMS) {
     throw new Exception("VALIDATE_PARAMS yaml is empty.")
 }
+def TEST_IMAGE = env.getProperty('TEST_IMAGE') ?: 'xrally-openstack:1.4.0'
+def JOB_TIMEOUT = env.getProperty('JOB_TIMEOUT').toInteger() ?: 12
+def SLAVE_NODE = env.getProperty('SLAVE_NODE') ?: 'docker'
+def rally = VALIDATE_PARAMS.get('rally') ?: [:]
+def scenariosRepo = rally.get('RALLY_CONFIG_REPO') ?: 'https://review.gerrithub.io/Mirantis/scale-scenarios'
+def scenariosBranch = rally.get('RALLY_CONFIG_BRANCH') ?: 'master'
+def pluginsRepo = rally.get('RALLY_PLUGINS_REPO') ?: 'https://github.com/Mirantis/rally-plugins'
+def pluginsBranch = rally.get('RALLY_PLUGINS_BRANCH') ?: 'master'
+def tags = rally.get('RALLY_TAGS') ?: []
 
-if (env.JOB_TIMEOUT == ''){
-    job_timeout = 12
-} else {
-    job_timeout = env.JOB_TIMEOUT.toInteger()
-}
-timeout(time: job_timeout, unit: 'HOURS') {
-    node() {
-        try{
-            stage('Setup virtualenv for Pepper') {
-                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            }
+// contrainer working dir vars
+def rallyWorkdir = '/home/rally'
+def rallyPluginsDir = "${rallyWorkdir}/rally-plugins"
+def rallyScenariosDir = "${rallyWorkdir}/rally-scenarios"
+def rallyResultsDir = "${rallyWorkdir}/test_results"
+def rallySecrets = "${rallyWorkdir}/secrets"
 
-            stage('Configure') {
-                validate.installDocker(pepperEnv, TARGET_NODE)
-                if (ACCUMULATE_RESULTS.toBoolean() == false) {
-                    sh "rm -r ${artifacts_dir}"
+// env vars
+def env_vars = []
+def platform = [
+    type: 'unknown',
+    stacklight: [enabled: false, grafanaPass: ''],
+]
+def cmp_count
+
+// test results vars
+def testResult
+def tasksParallel = [:]
+def parallelResults = [:]
+def configRun = [:]
+
+timeout(time: JOB_TIMEOUT, unit: 'HOURS') {
+    node (SLAVE_NODE) {
+
+        // local dir vars
+        def workDir = "${env.WORKSPACE}/rally"
+        def pluginsDir = "${workDir}/rally-plugins"
+        def scenariosDir = "${workDir}/rally-scenarios"
+        def secrets = "${workDir}/secrets"
+        def artifacts = "${workDir}/validation_artifacts"
+
+        stage('Configure env') {
+
+            def master = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+            // create local directories
+            sh "rm -rf ${workDir} || true"
+            sh "mkdir -p ${artifacts} ${secrets}"
+            writeFile file: "${workDir}/entrypoint.sh", text: '''#!/bin/bash
+set -xe
+exec "$@"
+'''
+            sh "chmod 755 ${workDir}/entrypoint.sh"
+
+            // clone repo with Rally plugins and checkout refs/branch
+            checkout([
+                $class           : 'GitSCM',
+                branches         : [[name: 'FETCH_HEAD']],
+                extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: pluginsDir]],
+                userRemoteConfigs: [[url: pluginsRepo, refspec: pluginsBranch]],
+            ])
+
+            // clone scenarios repo and switch branch / fetch refspecs
+            checkout([
+                $class           : 'GitSCM',
+                branches         : [[name: 'FETCH_HEAD']],
+                extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: scenariosDir]],
+                userRemoteConfigs: [[url: scenariosRepo, refspec: scenariosBranch]],
+            ])
+
+            // get number of computes in the cluster
+            platform['cluster_name'] = salt.getPillar(
+                master, 'I@salt:master', '_param:cluster_name'
+            )['return'][0].values()[0]
+            def rcs_str_node = salt.getPillar(
+                master, 'I@salt:master', 'reclass:storage:node'
+            )['return'][0].values()[0]
+
+            // set up Openstack env variables
+            if (rally.get('K8S_RALLY').toBoolean() == false) {
+
+                platform['type'] = 'openstack'
+                platform['cmp_count'] = rcs_str_node.openstack_compute_rack01['repeat']['count']
+                def rally_variables = [
+                    "floating_network=${rally.FLOATING_NETWORK}",
+                    "rally_image=${rally.RALLY_IMAGE}",
+                    "rally_flavor=${rally.RALLY_FLAVOR}",
+                    "availability_zone=${rally.AVAILABILITY_ZONE}",
+                ]
+
+                env_vars = validate._get_keystone_creds_v3(master)
+                if (!env_vars) {
+                    env_vars = validate._get_keystone_creds_v2(master)
                 }
-                sh "mkdir -p ${artifacts_dir}"
+                env_vars = env_vars + rally_variables
+
+            } else {
+            // set up Kubernetes env variables get required secrets
+                platform['type'] = 'k8s'
+                platform['cmp_count'] = rcs_str_node.kubernetes_compute_rack01['repeat']['count']
+
+                def kubernetes = salt.getPillar(
+                    master, 'I@kubernetes:master and *01*', 'kubernetes:master'
+                )['return'][0].values()[0]
+
+                env_vars = [
+                    "KUBERNETES_HOST=http://${kubernetes.apiserver.vip_address}" +
+                    ":${kubernetes.apiserver.insecure_port}",
+                    "KUBERNETES_CERT_AUTH=${rallySecrets}/k8s-ca.crt",
+                    "KUBERNETES_CLIENT_KEY=${rallySecrets}/k8s-client.key",
+                    "KUBERNETES_CLIENT_CERT=${rallySecrets}/k8s-client.crt",
+                ]
+
+                // get K8S certificates to manage cluster
+                def k8s_ca = salt.getFileContent(
+                    master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/ca-kubernetes.crt'
+                )
+                def k8s_client_key = salt.getFileContent(
+                    master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.key'
+                )
+                def k8s_client_crt = salt.getFileContent(
+                    master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.crt'
+                )
+                writeFile file: "${secrets}/k8s-ca.crt", text: k8s_ca
+                writeFile file: "${secrets}/k8s-client.key", text: k8s_client_key
+                writeFile file: "${secrets}/k8s-client.crt", text: k8s_client_crt
+
             }
 
-            stage('Run Tempest tests') {
-                if (RUN_TEMPEST_TESTS.toBoolean() == true) {
-                    def tempest = VALIDATE_PARAMS.get('tempest') ?: []
-                    validate.runTempestTests(
-                        pepperEnv, TARGET_NODE, TEST_IMAGE,
-                        artifacts_dir, tempest.TEMPEST_CONFIG_REPO,
-                        tempest.TEMPEST_CONFIG_BRANCH, tempest.TEMPEST_REPO,
-                        tempest.TEMPEST_VERSION, tempest.TEMPEST_TEST_SET
+            // get Stacklight data
+            if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
+                platform['stacklight']['enabled'] = true
+
+                def grafana = salt.getPillar(
+                    master, 'I@grafana:client', 'grafana:client:server'
+                )['return'][0].values()[0]
+
+                platform['stacklight']['grafanaPass'] = grafana['password']
+            }
+
+            if (! rally.PARALLEL_PERFORMANCE.toBoolean()) {
+
+                // Define map with docker commands
+                def commands = validate.runRallyTests(
+                    platform, rally.RALLY_SCENARIOS,
+                    rally.RALLY_SL_SCENARIOS, rally.RALLY_TASK_ARGS_FILE,
+                    rally.RALLY_DB_CONN_STRING, tags,
+                    rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+                )
+                def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
+
+                configRun = [
+                    'image': TEST_IMAGE,
+                    'baseRepoPreConfig': false,
+                    'dockerMaxCpus': 2,
+                    'dockerHostname': 'localhost',
+                    'dockerExtraOpts': [
+                        "--network=host",
+                        "--entrypoint=/entrypoint.sh",
+                        "-w ${rallyWorkdir}",
+                        "-v ${workDir}/entrypoint.sh:/entrypoint.sh",
+                        "-v ${pluginsDir}/:${rallyPluginsDir}",
+                        "-v ${scenariosDir}/:${rallyScenariosDir}",
+                        "-v ${artifacts}/:${rallyResultsDir}",
+                        "-v ${secrets}/:${rallySecrets}",
+                    ],
+                    'envOpts'         : env_vars,
+                    'runCommands'     : commands_list,
+                ]
+                common.infoMsg('Docker config:')
+                println configRun
+                common.infoMsg('Docker commands list:')
+                println commands
+
+            } else {
+
+                // Perform parallel testing of the components with Rally
+                def components = [
+                    Common: [],
+                    Stacklight: [],
+                ]
+
+                // get list of directories inside scenarios path
+                def scenPath = "${scenariosDir}/${rally.RALLY_SCENARIOS}"
+                def mainComponents = sh(
+                    script: "find ${scenPath} -maxdepth 1 -mindepth 1 -type d -exec basename {} \\;",
+                    returnStdout: true,
+                ).trim()
+                if (! mainComponents) {
+                    error(
+                        "No directories found inside RALLY_SCENARIOS ${rally.RALLY_SCENARIOS}\n" +
+                        "Either set PARALLEL_PERFORMANCE=false or populate ${rally.RALLY_SCENARIOS} " +
+                        "with component directories which include corresponding scenarios"
                     )
-                    if (tempest.GENERATE_REPORT.toBoolean() == true) {
-                        common.infoMsg("Generating html test report ...")
-                        validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
-                    }
-                } else {
-                    common.infoMsg("Skipping Tempest tests")
                 }
-            }
+                components['Common'].addAll(mainComponents.split('\n'))
+                common.infoMsg( "Adding for parallel execution sub dirs found in " +
+                    "RALLY_SCENARIOS (${rally.RALLY_SCENARIOS}):"
+                )
+                print mainComponents
 
-            stage('Run Rally tests') {
-                if (RUN_RALLY_TESTS.toBoolean() == true) {
-                    def rally = VALIDATE_PARAMS.get('rally') ?: []
-                    def tags = rally.get('RALLY_TAGS') ?: []
-                    def report_dir = rally.REPORT_DIR ?: '/root/qa_results'
-                    def platform = ["type":"unknown", "stacklight_enabled":false]
-                    def rally_variables = []
-                    if (rally.K8S_RALLY.toBoolean() == false) {
-                      platform['type'] = 'openstack'
-                      rally_variables = ["floating_network=${rally.FLOATING_NETWORK}",
-                                         "rally_image=${rally.RALLY_IMAGE}",
-                                         "rally_flavor=${rally.RALLY_FLAVOR}",
-                                         "availability_zone=${rally.AVAILABILITY_ZONE}"]
-                    } else {
-                      platform['type'] = 'k8s'
+                if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
+                    def slScenPath = "${scenariosDir}/${rally.RALLY_SL_SCENARIOS}"
+                    def slComponents = sh(
+                        script: "find ${slScenPath} -maxdepth 1 -mindepth 1 -type d -exec basename {} \\;",
+                        returnStdout: true,
+                    ).trim()
+                    if (! slComponents) {
+                        error(
+                            "No directories found inside RALLY_SCENARIOS ${rally.RALLY_SL_SCENARIOS}\n" +
+                            "Either set PARALLEL_PERFORMANCE=false or populate ${rally.RALLY_SL_SCENARIOS} " +
+                            "with component directories which include corresponding scenarios"
+                        )
                     }
-                    if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
-                      platform['stacklight_enabled'] = true
+                    components['Stacklight'].addAll(slComponents.split('\n'))
+                    common.infoMsg( "Adding for parallel execution sub dirs found in " +
+                        "RALLY_SL_SCENARIOS (${rally.RALLY_SL_SCENARIOS}):"
+                    )
+                    print slComponents
+                }
+
+                // build up a map with tasks for parallel execution
+                def allComponents = components.values().flatten()
+                for (int i=0; i < allComponents.size(); i++) {
+                    // randomize run so we don't bump each other at the startup
+                    // also we need to let first thread create rally deployment
+                    // so all the rest rally threads can use it after
+                    def sleepSeconds = 15 * i
+
+                    def task = allComponents[i]
+                    def task_name = 'rally_' + task
+                    def curComponent = components.find { task in it.value }.key
+                    // inherit platform common data
+                    def curPlatform = platform
+
+                    // setup scenarios and stacklight switch per component
+                    def commonScens = "${rally.RALLY_SCENARIOS}/${task}"
+                    def stacklightScens = "${rally.RALLY_SL_SCENARIOS}/${task}"
+
+                    switch (curComponent) {
+                        case 'Common':
+                            stacklightScens = ''
+                            curPlatform['stacklight']['enabled'] = false
+                        break
+                        case 'Stacklight':
+                            commonScens = ''
+                            curPlatform['stacklight']['enabled'] = true
+                        break
                     }
-                    validate.runRallyTests(
-                        pepperEnv, TARGET_NODE, TEST_IMAGE,
-                        platform, artifacts_dir, rally.RALLY_CONFIG_REPO,
-                        rally.RALLY_CONFIG_BRANCH, rally.RALLY_PLUGINS_REPO,
-                        rally.RALLY_PLUGINS_BRANCH, rally.RALLY_SCENARIOS,
-                        rally.RALLY_SL_SCENARIOS, rally.RALLY_TASK_ARGS_FILE,
+
+                    def curCommands = validate.runRallyTests(
+                        curPlatform, commonScens,
+                        stacklightScens, rally.RALLY_TASK_ARGS_FILE,
                         rally.RALLY_DB_CONN_STRING, tags,
-                        rally.RALLY_TRENDS, rally_variables,
-                        report_dir, rally.SKIP_LIST
+                        rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
                     )
-                } else {
-                    common.infoMsg("Skipping Rally tests")
-                }
-            }
 
-            stage('Run SPT tests') {
-                if (RUN_SPT_TESTS.toBoolean() == true) {
-                    def spt = VALIDATE_PARAMS.get('spt') ?: []
-                    def spt_variables = ["spt_ssh_user=${spt.SPT_SSH_USER}",
-                                         "spt_floating_network=${spt.FLOATING_NETWORK}",
-                                         "spt_image=${spt.SPT_IMAGE}",
-                                         "spt_user=${spt.SPT_IMAGE_USER}",
-                                         "spt_flavor=${spt.SPT_FLAVOR}",
-                                         "spt_availability_zone=${spt.AVAILABILITY_ZONE}"]
-                    validate.runSptTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, spt_variables)
+                    // copy required files for the current task
+                    def taskWorkDir = "${env.WORKSPACE}/rally_" + task
+                    def taskPluginsDir = "${taskWorkDir}/rally-plugins"
+                    def taskScenariosDir = "${taskWorkDir}/rally-scenarios"
+                    def taskArtifacts = "${taskWorkDir}/validation_artifacts"
+                    def taskSecrets = "${taskWorkDir}/secrets"
+                    sh "rm -rf ${taskWorkDir} || true"
+                    sh "cp -ra ${workDir} ${taskWorkDir}"
 
-                    if (spt.GENERATE_REPORT.toBoolean() == true) {
-                        common.infoMsg("Generating html test report ...")
-                        validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
+                    def curCommandsList = curCommands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
+                    def curConfigRun = [
+                        'image': TEST_IMAGE,
+                        'baseRepoPreConfig': false,
+                        'dockerMaxCpus': 2,
+                        'dockerHostname': 'localhost',
+                        'dockerExtraOpts': [
+                            "--network=host",
+                            "--entrypoint=/entrypoint.sh",
+                            "-w ${rallyWorkdir}",
+                            "-v ${taskWorkDir}/entrypoint.sh:/entrypoint.sh",
+                            "-v ${taskPluginsDir}/:${rallyPluginsDir}",
+                            "-v ${taskScenariosDir}/:${rallyScenariosDir}",
+                            "-v ${taskArtifacts}/:${rallyResultsDir}",
+                            "-v ${taskSecrets}/:${rallySecrets}",
+                        ],
+                        'envOpts'         : env_vars,
+                        'runCommands'     : curCommandsList,
+                    ]
+
+                    tasksParallel['rally_' + task] = {
+                        sleep sleepSeconds
+                        common.infoMsg("Docker config for task $task")
+                        println curConfigRun
+                        common.infoMsg("Docker commands list for task $task")
+                        println curCommands
+                        parallelResults[task_name] = salt_testing.setupDockerAndTest(curConfigRun)
                     }
-                } else {
-                    common.infoMsg("Skipping SPT tests")
                 }
             }
+        }
 
-            stage('Collect results') {
-                archiveArtifacts artifacts: "${artifacts_dir}/*"
+        stage('Run Rally tests') {
+
+            def dockerStatuses = [:]
+
+            // start tests in Docker
+            if (! rally.PARALLEL_PERFORMANCE.toBoolean()) {
+                testResult = salt_testing.setupDockerAndTest(configRun)
+                dockerStatuses['rally'] = (testResult) ? 'OK' : 'FAILED'
+            } else {
+                common.infoMsg('Jobs to run in threads: ' + tasksParallel.keySet().join(' '))
+                parallel tasksParallel
+                parallelResults.each { task ->
+                    dockerStatuses[task.key] = (task.value) ? 'OK' : 'FAILED'
+                }
             }
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-            throw e
+            // safely archiving all possible results
+            dockerStatuses.each { task ->
+                print "Collecting results for ${task.key} (docker status = '${task.value}')"
+                try {
+                    archiveArtifacts artifacts: "${task.key}/validation_artifacts/*"
+                } catch (Throwable e) {
+                    print 'failed to get artifacts'
+                }
+            }
+            // setting final job status
+            def failed = dockerStatuses.findAll { it.value == 'FAILED' }
+            if (failed.size() == dockerStatuses.size()) {
+                currentBuild.result = 'FAILURE'
+            } else if (dockerStatuses.find { it.value != 'OK' }) {
+                currentBuild.result = 'UNSTABLE'
+            }
+        }
+
+        stage('Clean env') {
+            // remove secrets
+            sh 'find ./ -type d -name secrets -exec rm -rf \\\"{}\\\" \\; || true'
         }
     }
 }