Merge "Fix report file path"
diff --git a/.gitignore b/.gitignore
index f8b92c3..3060674 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
 .gradle
 build
+.idea
\ No newline at end of file
diff --git a/aptly-promote-pipeline.groovy b/aptly-promote-pipeline.groovy
index 00d41b8..eb10f40 100644
--- a/aptly-promote-pipeline.groovy
+++ b/aptly-promote-pipeline.groovy
@@ -26,8 +26,9 @@
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
+     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
      throw e
   } finally {
      common.sendNotification(currentBuild.result,"",["slack"])
   }
-}
\ No newline at end of file
+}
diff --git a/build-debian-packages-influxdb-relay.groovy b/build-debian-packages-influxdb-relay.groovy
new file mode 100644
index 0000000..6040849
--- /dev/null
+++ b/build-debian-packages-influxdb-relay.groovy
@@ -0,0 +1,106 @@
+def common = new com.mirantis.mk.Common()
+def git = new com.mirantis.mk.Git()
+def artifactory = new com.mirantis.mk.Artifactory()
+def aptly = new com.mirantis.mk.Aptly()
+
+def timestamp = common.getDatetime()
+def version = "1.0~${timestamp}"
+
+node('docker') {
+    try{
+
+        stage("cleanup") {
+            sh("rm -rf * || true")
+        }
+
+        def workingDir = "src/github.com/influxdata"
+        stage("checkout") {
+            git.checkoutGitRepository(
+                "${workingDir}/influxdb-relay",
+                "${SOURCE_URL}",
+                SOURCE_BRANCH,
+                SOURCE_CREDENTIALS,
+                true,
+                30,
+                1
+            )
+        }
+
+        try {
+
+            def jenkinsUID = sh (
+                script: 'id -u',
+                returnStdout: true
+            ).trim()
+            def imgName = "${OS}-${DIST}-${ARCH}"
+            def img
+
+            stage("build image") {
+                img = docker.build(
+                    "${imgName}:${timestamp}",
+                    [
+                        "--build-arg uid=${jenkinsUID}",
+                        "--build-arg timestamp=${timestamp}",
+                        "-f ${workingDir}/influxdb-relay/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
+                        "."
+                    ].join(' ')
+                )
+            }
+            stage("build package") {
+                img.inside{
+                    sh("""wget https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz &&
+                        tar xf go1.9.linux-amd64.tar.gz &&
+                        export GOROOT=\$PWD/go &&
+                        export PATH=\$PATH:\$GOROOT/bin &&
+                        export GOPATH=\$PWD &&
+                        cd src/github.com/influxdata/influxdb-relay &&
+                        ./build.py --package --version=\"${version}\" --platform=linux --arch=amd64""")
+                }
+                archiveArtifacts artifacts: "${workingDir}/influxdb-relay/build/*.deb"
+            }
+            if (UPLOAD_APTLY.toBoolean()) {
+                lock("aptly-api") {
+                    stage("upload") {
+                        def buildSteps = [:]
+                        def debFiles = sh script: "ls ${workingDir}/influxdb-relay/build/*.deb", returnStdout: true
+                        def debFilesArray = debFiles.trim().tokenize()
+                        def workspace = common.getWorkspace()
+                        for (int i = 0; i < debFilesArray.size(); i++) {
+
+                            def debFile = debFilesArray[i];
+                            buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                                "${workspace}/"+debFile,
+                                APTLY_URL,
+                                APTLY_REPO,
+                                true
+                            )
+                        }
+                        parallel buildSteps
+                    }
+                    stage("publish") {
+                        aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                        aptly.publish(APTLY_URL)
+                    }
+                }
+            }
+
+        } catch (Exception e) {
+            currentBuild.result = 'FAILURE'
+            println "Cleaning up docker images"
+            sh("docker images | grep -E '[-:\\ ]+${timestamp}[\\.\\ /\$]+' | awk '{print \$3}' | xargs docker rmi -f || true")
+            throw e
+        }
+
+    } catch (Throwable e) {
+       // If there was an exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
+
+       if (currentBuild.result != 'FAILURE') {
+          sh("rm -rf *")
+       }
+    }
+}
diff --git a/build-debian-packages-jmx-exporter.groovy b/build-debian-packages-jmx-exporter.groovy
index d356f69..71f626e 100644
--- a/build-debian-packages-jmx-exporter.groovy
+++ b/build-debian-packages-jmx-exporter.groovy
@@ -69,6 +69,7 @@
     } catch (Throwable e) {
        // If there was an exception thrown, the build failed
        currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
        throw e
     } finally {
        common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/build-debian-packages-libvirt-exporter.groovy b/build-debian-packages-libvirt-exporter.groovy
index eb109cb..d373961 100644
--- a/build-debian-packages-libvirt-exporter.groovy
+++ b/build-debian-packages-libvirt-exporter.groovy
@@ -31,7 +31,7 @@
             }
         }
 
-        def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-xenial")
+        def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-${DIST}")
         stage("build package") {
             img.inside("-u root:root") {
                 sh("apt-get update && apt-get install ruby ruby-dev && gem install fpm")
@@ -72,6 +72,7 @@
     } catch (Throwable e) {
        // If there was an exception thrown, the build failed
        currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
        throw e
     } finally {
        common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/build-debian-packages-pipeline.groovy b/build-debian-packages-pipeline.groovy
index bc4ed38..0d9839f 100644
--- a/build-debian-packages-pipeline.groovy
+++ b/build-debian-packages-pipeline.groovy
@@ -28,6 +28,13 @@
   uploadPpa = null
 }
 
+def lintianCheck
+try {
+  lintianCheck = LINTIAN_CHECK.toBoolean()
+} catch (MissingPropertyException e) {
+  lintianCheck = true
+}
+
 def uploadAptly
 try {
   uploadAptly = UPLOAD_APTLY.toBoolean()
@@ -73,13 +80,16 @@
       )
       archiveArtifacts artifacts: "build-area/*.deb"
     }
-    stage("lintian") {
-      changes = sh script: "ls build-area/*_"+ARCH+".changes", returnStdout: true
-      try {
-        debian.runLintian(changes.trim(), OS, OS+":"+DIST)
-      } catch (Exception e) {
-        println "[WARN] Lintian returned non-zero exit status"
-        currentBuild.result = 'UNSTABLE'
+
+    if (lintianCheck) {
+      stage("lintian") {
+        changes = sh script: "ls build-area/*_"+ARCH+".changes", returnStdout: true
+        try {
+          debian.runLintian(changes.trim(), OS, OS+":"+DIST)
+        } catch (Exception e) {
+          println "[WARN] Lintian returned non-zero exit status"
+          currentBuild.result = 'UNSTABLE'
+        }
       }
     }
 
@@ -116,8 +126,9 @@
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
+     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
      throw e
   } finally {
      common.sendNotification(currentBuild.result,"",["slack"])
   }
-}
\ No newline at end of file
+}
diff --git a/build-debian-packages-prometheus-relay.groovy b/build-debian-packages-prometheus-relay.groovy
new file mode 100644
index 0000000..1044371
--- /dev/null
+++ b/build-debian-packages-prometheus-relay.groovy
@@ -0,0 +1,108 @@
+def common = new com.mirantis.mk.Common()
+def git = new com.mirantis.mk.Git()
+def artifactory = new com.mirantis.mk.Artifactory()
+def aptly = new com.mirantis.mk.Aptly()
+
+def timestamp = common.getDatetime()
+def version = "0.1~${timestamp}"
+
+node('docker') {
+    try{
+
+        stage("cleanup") {
+            sh("rm -rf * || true")
+        }
+
+        def workingDir = "src/gerrit.mcp.mirantis.net/debian"
+        stage("checkout") {
+            git.checkoutGitRepository(
+                "${workingDir}/prometheus-relay",
+                "${SOURCE_URL}",
+                SOURCE_BRANCH,
+                SOURCE_CREDENTIALS,
+                true,
+                30,
+                1
+            )
+        }
+
+        try {
+
+            def jenkinsUID = sh (
+                script: 'id -u',
+                returnStdout: true
+            ).trim()
+            def imgName = "${OS}-${DIST}-${ARCH}"
+            def img
+
+            stage("build image") {
+                img = docker.build(
+                    "${imgName}:${timestamp}",
+                    [
+                        "--build-arg uid=${jenkinsUID}",
+                        "--build-arg timestamp=${timestamp}",
+                        "-f ${workingDir}/prometheus-relay/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
+                        "."
+                    ].join(' ')
+                )
+            }
+            stage("build package") {
+                img.inside{
+                    sh("""wget https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz &&
+                        tar xf go1.8.1.linux-amd64.tar.gz &&
+                        export GOROOT=\$PWD/go &&
+                        export GOPATH=\$PWD &&
+                        export PATH=\$PATH:\$GOPATH/bin:\$GOROOT/bin &&
+                        cd src/gerrit.mcp.mirantis.net/debian/prometheus-relay &&
+                        make""")
+                }
+                archiveArtifacts artifacts: "${workingDir}/prometheus-relay/build/*.deb"
+            }
+            if (UPLOAD_APTLY.toBoolean()) {
+                lock("aptly-api") {
+                    stage("upload") {
+                        def buildSteps = [:]
+                        def debFiles = sh script: "ls ${workingDir}/prometheus-relay/build/*.deb", returnStdout: true
+                        def debFilesArray = debFiles.trim().tokenize()
+                        def workspace = common.getWorkspace()
+                        for (int i = 0; i < debFilesArray.size(); i++) {
+
+                            def debFile = debFilesArray[i];
+                            buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                                "${workspace}/"+debFile,
+                                APTLY_URL,
+                                APTLY_REPO,
+                                true
+                            )
+                        }
+                        parallel buildSteps
+                    }
+                    stage("publish") {
+                        aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                        aptly.publish(APTLY_URL)
+                    }
+
+                    stage("rebuild docker images") {
+                        build job: "docker-build-images-prometheus", parameters: []
+                    }
+                }
+            }
+
+        } catch (Exception e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        }
+
+    } catch (Throwable e) {
+       // If there was an exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
+
+       if (currentBuild.result != 'FAILURE') {
+          sh("rm -rf *")
+       }
+    }
+}
diff --git a/build-debian-packages-telegraf.groovy b/build-debian-packages-telegraf.groovy
index a2b71a8..dde098e 100644
--- a/build-debian-packages-telegraf.groovy
+++ b/build-debian-packages-telegraf.groovy
@@ -98,6 +98,7 @@
     } catch (Throwable e) {
        // If there was an exception thrown, the build failed
        currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
        throw e
     } finally {
        common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/build-extra-dpdk-pipeline.groovy b/build-extra-dpdk-pipeline.groovy
index 39928dc..357a9ad 100644
--- a/build-extra-dpdk-pipeline.groovy
+++ b/build-extra-dpdk-pipeline.groovy
@@ -59,8 +59,9 @@
      } catch (Throwable e) {
        // If there was an error or exception thrown, the build failed
        currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
        throw e
     } finally {
        common.sendNotification(currentBuild.result,"",["slack"])
     }
-}
\ No newline at end of file
+}
diff --git a/change-config.groovy b/change-config.groovy
index 44832ed..16cd629 100644
--- a/change-config.groovy
+++ b/change-config.groovy
@@ -9,6 +9,7 @@
  *   TARGET_SUBSET_TEST         Number of nodes to test config changes, empty string means all targetted nodes.
  *   TARGET_SUBSET_LIVE         Number of selected noded to live apply selected config changes.
  *   TARGET_BATCH_LIVE          Batch size for the complete live config changes on all nodes, empty string means apply to all targetted nodes.
+ *   PULL_MODEL                 Pull the latest cluster model using reclass.storage.data state
  *
 **/
 
@@ -37,6 +38,14 @@
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
+        if (common.validInputParam("PULL_MODEL") && PULL_MODEL.toBoolean() == true) {
+            stage('Update the reclass cluster model') {
+                def saltMasterTarget = ['expression': 'I@salt:master', 'type': 'compound']
+                result = salt.runSaltCommand(saltMaster, 'local', saltMasterTarget, 'state.apply', null, "reclass.storage.data")
+                salt.checkResult(result)
+            }
+        }
+
         stage('List target servers') {
             minions = salt.getMinions(saltMaster, TARGET_SERVERS)
             if (minions.isEmpty()) {
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 15252c9..782a051 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -225,7 +225,7 @@
             // Postgres client - initialize OSS services databases
             timeout(300){
                 println "Waiting for postgresql database to come up.."
-                salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_db | grep "ready to accept"; then break; else sleep 5; fi; done')
+                salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
             }
             // XXX: first run usually fails on some inserts, but we need to create databases at first 
             salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
@@ -337,6 +337,7 @@
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
         throw e
     } finally {
         // Cleanup
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 22867ce..f432c16 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -64,9 +64,15 @@
 // Define global variables
 def saltMaster
 def venv
+def outputs = [:]
+
+def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
 
 if (STACK_TYPE == 'aws') {
     def aws_env_vars
+} else if (STACK_TYPE == 'heat') {
+    def envParams
+    def openstackCloud
 }
 
 node("python") {
@@ -79,9 +85,14 @@
         //
         stage ('Create infrastructure') {
 
+            outputs.put('stack_type', STACK_TYPE)
+
             if (STACK_TYPE == 'heat') {
                 // value defaults
-                def openstackCloud
+                envParams = [
+                    'cluster_zone': HEAT_STACK_ZONE,
+                    'cluster_public_net': HEAT_STACK_PUBLIC_NET
+                ]
 
                 if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
                     error("If you want to reuse existing stack you need to provide it's name")
@@ -99,6 +110,10 @@
                     }
                 }
 
+                // no underscore in STACK_NAME
+                STACK_NAME = STACK_NAME.replaceAll('_', '-')
+                outputs.put('stack_name', STACK_NAME)
+
                 // set description
                 currentBuild.description = "${STACK_NAME}"
 
@@ -113,13 +128,14 @@
                     OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
                     OPENSTACK_API_VERSION)
                 openstack.getKeystoneToken(openstackCloud, venv)
+
                 //
                 // Verify possibility of create stack for given user and stack type
                 //
                 wrap([$class: 'BuildUser']) {
                     if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !STACK_REUSE.toBoolean()) {
-                        def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", venv)
-                        if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
+                        def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}".replaceAll('_', '-'), venv)
+                        if (existingStacks.size() >= _MAX_PERMITTED_STACKS) {
                             STACK_DELETE = "false"
                             throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
                         }
@@ -127,32 +143,26 @@
                 }
                 // launch stack
                 if (STACK_REUSE.toBoolean() == false) {
-                    stage('Launch new Heat stack') {
-                        envParams = [
-                            'cluster_zone': HEAT_STACK_ZONE,
-                            'cluster_public_net': HEAT_STACK_PUBLIC_NET
-                        ]
 
-                        // set reclass repo in heat env
-                        try {
-                            envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
-                            envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
-                        } catch (MissingPropertyException e) {
-                            common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
-                        }
-
-                        def legacy_env = false;
-                        //FIXME:
-                        if (false && STACK_TEMPLATE.startsWith('virtual_') && !STACK_TEMPLATE.contains('aio')) {
-                            legacy_env = true;
-                        }
-
-                        openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, legacy_env)
+                    // set reclass repo in heat env
+                    try {
+                        envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
+                        envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
+                    } catch (MissingPropertyException e) {
+                        common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
                     }
+
+                    openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
                 }
 
                 // get SALT_MASTER_URL
                 saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
+                // check that saltMasterHost is valid
+                if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                    common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                    throw new Exception("saltMasterHost is not a valid ip")
+                }
+
                 currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
 
                 SALT_MASTER_URL = "http://${saltMasterHost}:6969"
@@ -185,6 +195,7 @@
 
                 // set description
                 currentBuild.description = STACK_NAME
+                outputs.put('stack_name', STACK_NAME)
 
                 if (STACK_REUSE.toBoolean() == false) {
                     // get templates
@@ -204,6 +215,13 @@
 
                 // get outputs
                 saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
+
+                // check that saltMasterHost is valid
+                if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                    common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                    throw new Exception("saltMasterHost is not a valid ip")
+                }
+
                 currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
                 SALT_MASTER_URL = "http://${saltMasterHost}:6969"
 
@@ -211,6 +229,8 @@
                 throw new Exception("STACK_TYPE ${STACK_TYPE} is not supported")
             }
 
+            outputs.put('salt_api', SALT_MASTER_URL)
+
             // Connect to Salt master
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
@@ -249,6 +269,11 @@
             stage('Install Ceph OSDs') {
                 orchestrate.installCephOsd(saltMaster)
             }
+
+
+            stage('Install Ceph clients') {
+                orchestrate.installCephClient(saltMaster)
+            }
         }
 
         // install k8s
@@ -257,9 +282,12 @@
             stage('Install Kubernetes infra') {
                 if (STACK_TYPE == 'aws') {
                     // configure kubernetes_control_address - save loadbalancer
-                    def kubernetes_control_address = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ControlLoadBalancer')
-                    print(kubernetes_control_address)
-                    salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', kubernetes_control_address], null, true)
+                    def awsOutputs = aws.getOutputs(venv, aws_env_vars, STACK_NAME)
+                    common.prettyPrint(awsOutputs)
+                    if (awsOutputs.containsKey('ControlLoadBalancer')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', awsOutputs['ControlLoadBalancer']], null, true)
+                        outputs.put('kubernetes_apiserver', 'https://' + awsOutputs['ControlLoadBalancer'])
+                    }
                 }
 
                 // ensure certificates are generated properly
@@ -273,13 +301,15 @@
                 stage('Install Contrail for Kubernetes') {
                     orchestrate.installContrailNetwork(saltMaster)
                     orchestrate.installContrailCompute(saltMaster)
-                    orchestrate.installKubernetesContrailCompute(saltMaster)
                 }
             }
 
             stage('Install Kubernetes control') {
-
                 orchestrate.installKubernetesControl(saltMaster)
+
+                // collect artifacts (kubeconfig)
+                writeFile(file: 'kubeconfig', text: salt.getFileContent(saltMaster, 'I@kubernetes:master and *01*', '/etc/kubernetes/admin-kube-config'))
+                archiveArtifacts(artifacts: 'kubeconfig')
             }
 
             stage('Scale Kubernetes computes') {
@@ -294,6 +324,12 @@
                         // wait for computes to boot up
                         aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
                         sleep(60)
+
+                    } else if (STACK_TYPE == 'heat') {
+                        envParams.put('cluster_node_count', STACK_COMPUTE_COUNT)
+
+                        openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, "update")
+                        sleep(60)
                     }
 
                     orchestrate.installKubernetesCompute(saltMaster)
@@ -342,6 +378,12 @@
 
         }
 
+        if (common.checkContains('STACK_INSTALL', 'cicd')) {
+            stage('Install Cicd') {
+                orchestrate.installDockerSwarm(saltMaster)
+                orchestrate.installCicd(saltMaster)
+            }
+        }
 
         if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
             stage('Install StackLight v1') {
@@ -363,23 +405,6 @@
         def artifacts_dir = '_artifacts/'
 
         if (common.checkContains('STACK_TEST', 'k8s')) {
-            stage('Run k8s bootstrap tests') {
-                def image = 'tomkukral/k8s-scripts'
-                def output_file = image.replaceAll('/', '-') + '.output'
-
-                // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
-
-                // collect output
-                sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                sh "cat ${artifacts_dir}${output_file}"
-
-                // collect artifacts
-                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-            }
-
             stage('Run k8s conformance e2e tests') {
                 def image = TEST_K8S_CONFORMANCE_IMAGE
                 def output_file = image.replaceAll('/', '-') + '.output'
@@ -418,18 +443,24 @@
 
         if (common.checkContains('STACK_TEST', 'ceph')) {
             stage('Run infra tests') {
+                sleep(120)
                 def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
-                salt.cmdRun(saltMaster, 'I@salt:master', cmd)
+                salt.cmdRun(saltMaster, 'I@salt:master', cmd, false)
                 writeFile(file: 'report.xml', text: salt.getFileContent(saltMaster, 'I@salt:master', '/root/report.xml'))
                 junit(keepLongStdio: true, testResults: 'report.xml')
             }
         }
 
 
-        if (common.checkContains('STACK_INSTALL', 'finalize')) {
-            stage('Finalize') {
+        stage('Finalize') {
+            if (common.checkContains('STACK_INSTALL', 'finalize')) {
                 salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
             }
+
+            outputsPretty = common.prettify(outputs)
+            print(outputsPretty)
+            writeFile(file: 'outputs.json', text: outputsPretty)
+            archiveArtifacts(artifacts: 'outputs.json')
         }
 
     } catch (Throwable e) {
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index a22439a..c23f1c3 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -48,6 +48,7 @@
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
+     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
      throw e
   } finally {
      common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index c113d28..1b62b53 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -68,6 +68,7 @@
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
+     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
      throw e
   }
 }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index f6090c0..4326e7e 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -22,11 +22,13 @@
     def templateEnv = "${env.WORKSPACE}/template"
     def modelEnv = "${env.WORKSPACE}/model"
     def testEnv = "${env.WORKSPACE}/test"
+    def pipelineEnv = "${env.WORKSPACE}/pipelines"
 
     try {
         def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
         def clusterDomain = templateContext.default_context.cluster_domain
         def clusterName = templateContext.default_context.cluster_name
+        def saltMaster = templateContext.default_context.salt_master_hostname
         def cutterEnv = "${env.WORKSPACE}/cutter"
         def jinjaEnv = "${env.WORKSPACE}/jinja"
         def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
@@ -67,7 +69,7 @@
             }
         }
 
-        def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
+        def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight", "ceph"]
         for (product in productList) {
 
             // get templateOutputDir and productDir
@@ -111,7 +113,7 @@
         }
 
         stage('Generate new SaltMaster node') {
-            def nodeFile = "${modelEnv}/nodes/cfg01.${clusterDomain}.yml"
+            def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
             def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
@@ -120,7 +122,7 @@
     reclass_data_revision: master
   linux:
     system:
-      name: cfg01
+      name: ${saltMaster}
       domain: ${clusterDomain}
 """
             sh "mkdir -p ${modelEnv}/nodes/"
@@ -132,7 +134,7 @@
         stage("Test") {
             if (SHARED_RECLASS_URL != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
                 sh("cp -r ${modelEnv} ${testEnv}")
-                saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", "", testEnv)
+                saltModelTesting.setupAndTestNode("${saltMaster}.${clusterDomain}", "", testEnv)
             }
         }
 
@@ -140,56 +142,62 @@
             // apt package genisoimage is required for this stage
 
             // download create-config-drive
-            def config_drive_script_url = "https://raw.githubusercontent.com/pupapaik/virt-utils/master/create-config-drive"
-            def user_data_script_url = "https://raw.githubusercontent.com/mceloud/scripts/master/master_config.sh"
-
+            def config_drive_script_url = "https://raw.githubusercontent.com/jiribroulik/scripts/master/create_config_drive.sh"
             sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+            def user_data_script_url = "https://raw.githubusercontent.com/mceloud/scripts/master/master_config.sh"
             sh "wget -O user_data.sh ${user_data_script_url}"
 
+            sh "git clone https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+            sh "git clone https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+            args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
             // load data from model
             def smc = [:]
-            smc['SALT_MASTER_MINION_ID'] = "cfg.${clusterDomain}"
+            smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
             smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
             smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
             smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
             smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-            smc['CICD_CONTROL_ADDRESS'] = templateContext['default_context']['cicd_control_address']
-            smc['INFRA_CONFIG_ADDRESS'] = templateContext['default_context']['infra_config_address']
 
             for (i in common.entries(smc)) {
                 sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
             }
 
             // create config-drive
-            sh "./create-config-drive --user-data user_data.sh --hostname cfg --model ${modelEnv} cfg.${clusterDomain}-config.iso"
-
+            sh "./create-config-drive ${args}"
+            sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
             // save iso to artifacts
-            archiveArtifacts artifacts: "cfg.${clusterDomain}-config.iso"
+            archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
         }
 
         stage ('Save changes reclass model') {
 
-            sh(returnStatus: true, script: "tar -zcf ${clusterName}.tar.gz -C ${modelEnv} .")
-            archiveArtifacts artifacts: "${clusterName}.tar.gz"
+            sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
+            archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
 
 
             if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
                  emailext(to: EMAIL_ADDRESS,
-                          attachmentsPattern: "${clusterName}.tar.gz",
+                          attachmentsPattern: "output-${clusterName}/*",
                           body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
                           subject: "Your Salt model ${clusterName}")
             }
+            dir("output-${clusterName}"){
+                deleteDir()
+            }
         }
 
     } catch (Throwable e) {
          // If there was an error or exception thrown, the build failed
          currentBuild.result = "FAILURE"
+         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
          throw e
     } finally {
         stage ('Clean workspace directories') {
             sh(returnStatus: true, script: "rm -rf ${templateEnv}")
             sh(returnStatus: true, script: "rm -rf ${modelEnv}")
+            sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
         }
          // common.sendNotification(currentBuild.result,"",["slack"])
     }
-}
+}
\ No newline at end of file
diff --git a/git-merge-branches-pipeline.groovy b/git-merge-branches-pipeline.groovy
index cff1db5..8293f87 100644
--- a/git-merge-branches-pipeline.groovy
+++ b/git-merge-branches-pipeline.groovy
@@ -22,6 +22,7 @@
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
+     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
      throw e
   }
 }
diff --git a/git-mirror-2way-pipeline.groovy b/git-mirror-2way-pipeline.groovy
index c1c808c..c20af8f 100644
--- a/git-mirror-2way-pipeline.groovy
+++ b/git-mirror-2way-pipeline.groovy
@@ -38,6 +38,7 @@
     } catch (Throwable e) {
        // If there was an error or exception thrown, the build failed
        currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
        throw e
     } finally {
        common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index 10fa9a0..5035fe6 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -17,6 +17,7 @@
     } catch (Throwable e) {
        // If there was an error or exception thrown, the build failed
        currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
        throw e
     } finally {
        common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/ironic-node-provision-pipeline.groovy b/ironic-node-provision-pipeline.groovy
new file mode 100644
index 0000000..1c96eaa
--- /dev/null
+++ b/ironic-node-provision-pipeline.groovy
@@ -0,0 +1,206 @@
+/**
+ *
+ * Provision ironic nodes
+ *
+ * Expected parameters:
+ *   STACK_NAME                 Infrastructure stack name
+ *   STACK_TYPE                 Deploy OpenStack/AWS [heat/aws], use 'physical' if no stack should be started
+ *
+ *   AWS_STACK_REGION           CloudFormation AWS region
+ *   AWS_API_CREDENTIALS        AWS Access key ID with  AWS secret access key
+ *   AWS_SSH_KEY                AWS key pair name (used for SSH access)
+ *
+ *   HEAT_STACK_ZONE            Heat stack availability zone
+ *   OPENSTACK_API_URL          OpenStack API address
+ *   OPENSTACK_API_CREDENTIALS  Credentials to the OpenStack API
+ *   OPENSTACK_API_PROJECT      OpenStack project to connect to
+ *   OPENSTACK_API_CLIENT       Versions of OpenStack python clients
+ *   OPENSTACK_API_VERSION      Version of the OpenStack API (2/3)
+
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API
+ *                              required for STACK_TYPE=physical
+ *   SALT_MASTER_URL            URL of Salt master
+
+ * Ironic settings:
+ *   IRONIC_AUTHORIZATION_PROFILE:    Name of profile with authorization info
+ *   IRONIC_DEPLOY_NODES:             Space separated list of ironic node name to deploy
+                                      'all' - trigger deployment of all nodes
+ *   IRONIC_DEPLOY_PROFILE:           Name of profile to apply to nodes during deployment
+ *   IRONIC_DEPLOY_PARTITION_PROFILE: Name of partition profile to apply
+ *   IRONIC_DEPLOY_TIMEOUT:           Timeout in minutes to wait for deploy
+ *
+ **/
+
+common = new com.mirantis.mk.Common()
+git = new com.mirantis.mk.Git()
+openstack = new com.mirantis.mk.Openstack()
+aws = new com.mirantis.mk.Aws()
+orchestrate = new com.mirantis.mk.Orchestrate()
+salt = new com.mirantis.mk.Salt()
+test = new com.mirantis.mk.Test()
+
+// Define global variables
+def saltMaster
+def venv
+def outputs = [:]
+
+def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
+
+def waitIronicDeployment(master, node_names, target, auth_profile, deploy_timeout=60) {
+    def failed_nodes = []
+    timeout (time:  deploy_timeout.toInteger(), unit: 'MINUTES'){
+        while (node_names.size() != 0) {
+            common.infoMsg("Waiting for nodes: " + node_names.join(", ") + " to be deployed.")
+            res = salt.runSaltProcessStep(master, target, 'ironicng.list_nodes', ["profile=${auth_profile}"], null, false)
+            for (n in res['return'][0].values()[0]['nodes']){
+                if (n['name'] in node_names) {
+                    if (n['provision_state'] == 'active'){
+                        common.successMsg("Node " + n['name'] + " deployment succeed.")
+                        node_names.remove(n['name'])
+                        continue
+                    } else if (n['provision_state'] == 'deploy failed'){
+                        common.warningMsg("Node " + n['name'] + " deployment failed.")
+                        node_names.remove(n['name'])
+                        failed_nodes.add(n['name'])
+                        continue
+                    }
+                }
+            }
+            sleep(5)
+        }
+    }
+    return failed_nodes
+}
+
+
+node("python") {
+    try {
+        // Set build-specific variables
+        venv = "${env.WORKSPACE}/venv"
+
+        def required_params = ['IRONIC_AUTHORIZATION_PROFILE', 'IRONIC_DEPLOY_NODES']
+        def missed_params = []
+        for (param in required_params) {
+            if (env[param] == '' ) {
+                missed_params.add(param)
+            }
+        }
+        if (missed_params){
+            common.errorMsg(missed_params.join(', ') + " should be set.")
+        }
+
+        if (IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES != 'all'){
+            common.errorMsg("IRONIC_DEPLOY_PROFILE should be set when deploying specific nodes.")
+        }
+
+        if (SALT_MASTER_URL == '' && STACK_NAME == ''){
+            common.errorMsg("Any of SALT_MASTER_URL or STACK_NAME should be defined.")
+        }
+
+        if (SALT_MASTER_URL == '' && STACK_NAME != '') {
+            // Get SALT_MASTER_URL machines
+            stage ('Getting SALT_MASTER_URL') {
+
+                outputs.put('stack_type', STACK_TYPE)
+
+                if (STACK_TYPE == 'heat') {
+                    // value defaults
+                    envParams = [
+                        'cluster_zone': HEAT_STACK_ZONE,
+                        'cluster_public_net': HEAT_STACK_PUBLIC_NET
+                    ]
+
+                    // create openstack env
+                    openstack.setupOpenstackVirtualenv(venv, OPENSTACK_API_CLIENT)
+                    openstackCloud = openstack.createOpenstackEnv(
+                        OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                        OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+                        OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                        OPENSTACK_API_VERSION)
+                    openstack.getKeystoneToken(openstackCloud, venv)
+
+
+                    // get SALT_MASTER_URL
+                    saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
+
+                } else if (STACK_TYPE == 'aws') {
+
+                    // setup environment
+                    aws.setupVirtualEnv(venv)
+
+                    // set aws_env_vars
+                    aws_env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+
+                    // get outputs
+                    saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
+                }
+
+                if (SALT_MASTER_URL == ''){
+                    // check that saltMasterHost is valid
+                    if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                        common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                        throw new Exception("saltMasterHost is not a valid ip")
+                    }
+                    currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
+                    SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+                } else {
+                    currentBuild.description = "${STACK_NAME}"
+                }
+            }
+        }
+
+        outputs.put('salt_api', SALT_MASTER_URL)
+
+        // Connect to Salt master
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+
+        def nodes_to_deploy=[]
+
+        stage('Trigger deployment on nodes') {
+            if (IRONIC_DEPLOY_PARTITION_PROFILE == '' && IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES == 'all'){
+                common.infoMsg("Trigger ironic.deploy")
+                salt.enforceState(saltMaster, RUN_TARGET, ['ironic.deploy'], true)
+            } else {
+                if (IRONIC_DEPLOY_NODES == 'all'){
+                     res = salt.runSaltProcessStep(saltMaster, RUN_TARGET, 'ironicng.list_nodes', ["profile=${IRONIC_AUTHORIZATION_PROFILE}"], null, true)
+                     // We trigger deployment on single salt minion
+                     for (n in res['return'][0].values()[0]['nodes']){
+                        nodes_to_deploy.add(n['name'])
+                     }
+                } else {
+                    nodes_to_deploy = IRONIC_DEPLOY_NODES.tokenize(',')
+                }
+
+                def cmd_params = ["profile=${IRONIC_AUTHORIZATION_PROFILE}", "deployment_profile=${IRONIC_DEPLOY_PROFILE}"]
+
+                if (IRONIC_DEPLOY_PARTITION_PROFILE){
+                    cmd_params.add("partition_profile=${IRONIC_DEPLOY_PARTITION_PROFILE}")
+                }
+
+                for (n in nodes_to_deploy){
+                    common.infoMsg("Trigger deployment of ${n}")
+                  salt.runSaltProcessStep(saltMaster, RUN_TARGET, 'ironicng.deploy_node', ["${n}"] + cmd_params, null, true)
+                }
+            }
+        }
+
+        stage('Waiting for deployment is done.') {
+            def failed_nodes = waitIronicDeployment(saltMaster, nodes_to_deploy, RUN_TARGET, IRONIC_AUTHORIZATION_PROFILE, IRONIC_DEPLOY_TIMEOUT)
+            if (failed_nodes){
+                common.errorMsg("Some nodes: " + failed_nodes.join(", ") + " are failed to deploy")
+                currentBuild.result = 'FAILURE'
+            } else {
+                common.successMsg("All nodes are deployed successfully.")
+            }
+        }
+
+        outputsPretty = common.prettify(outputs)
+        print(outputsPretty)
+        writeFile(file: 'outputs.json', text: outputsPretty)
+        archiveArtifacts(artifacts: 'outputs.json')
+    } catch (Throwable e) {
+        currentBuild.result = 'FAILURE'
+        throw e
+    }
+}
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index 6bcb788..00b0e7f 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -273,6 +273,7 @@
         } catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
             currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
             throw e
         }
     }
@@ -484,6 +485,7 @@
         } catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
             currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
             throw e
         }
     }
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 8e53396..535cde0 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -86,6 +86,7 @@
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
         throw e
     }
 }
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
index 4a04531..095697d 100644
--- a/openstack-compute-upgrade.groovy
+++ b/openstack-compute-upgrade.groovy
@@ -199,6 +199,7 @@
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
         throw e
     }
 }
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
index 70037a4..9cfa215 100644
--- a/ovs-gateway-upgrade.groovy
+++ b/ovs-gateway-upgrade.groovy
@@ -148,6 +148,7 @@
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
         throw e
     }
 }
diff --git a/release-salt-formulas-pipeline.groovy b/release-salt-formulas-pipeline.groovy
index 7660636..4aaaec9 100644
--- a/release-salt-formulas-pipeline.groovy
+++ b/release-salt-formulas-pipeline.groovy
@@ -24,6 +24,7 @@
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
+     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
      throw e
   } finally {
      common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 92fbb35..e59f0ce 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -123,6 +123,10 @@
             python.setupCookiecutterVirtualenv(cutterEnv)
         }
 
+        stage("Check workflow_definition") {
+            sh "python ${env.WORKSPACE}/workflow_definition_test.py"
+        }
+
         def contextFiles
         dir("${templateEnv}/contexts") {
             contextFiles = findFiles(glob: "*.yml")
@@ -165,6 +169,7 @@
 
     } catch (Throwable e) {
          currentBuild.result = "FAILURE"
+         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
          throw e
     } finally {
          def dummy = "dummy"
diff --git a/test-run-rally.groovy b/test-run-rally.groovy
new file mode 100644
index 0000000..4cf3bd3
--- /dev/null
+++ b/test-run-rally.groovy
@@ -0,0 +1,60 @@
+/**
+ *
+ * Service test pipeline
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL                 URL of Salt master
+ *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ * Test settings:
+ *   IMAGE_LINK                      Link to docker image with Rally
+ *   RALLY_SCENARIO                  Rally test scenario
+ *   TEST_TARGET                     Salt target for Rally node
+ *   CLEANUP_REPORTS_AND_CONTAINER   Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
+ *   DO_CLEANUP_RESOURCES            If "true": runs clean-up script for removing Rally and Tempest resources
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+test = new com.mirantis.mk.Test()
+
+// Define global variables
+def saltMaster
+
+node("python") {
+    try {
+
+        //
+        // Prepare connection
+        //
+        stage ('Connect to salt master') {
+            // Connect to Salt master
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        //
+        // Test
+        //
+
+        stage('Run OpenStack Rally scenario') {
+            test.runRallyScenarios(saltMaster, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
+                    DO_CLEANUP_RESOURCES)
+        }
+        stage('Copy test reports') {
+            test.copyTempestResults(saltMaster, TEST_TARGET)
+        }
+        stage('Archiving test artifacts') {
+            test.archiveRallyArtifacts(saltMaster, TEST_TARGET)
+        }
+    } catch (Throwable e) {
+        currentBuild.result = 'FAILURE'
+        throw e
+    } finally {
+        if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
+            stage('Cleanup reports and container') {
+                test.removeReports(saltMaster, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+                test.removeDockerContainer(saltMaster, TEST_TARGET, IMAGE_LINK)
+            }
+        }
+    }
+}
diff --git a/test-run-tempest.groovy b/test-run-tempest.groovy
new file mode 100644
index 0000000..4785992
--- /dev/null
+++ b/test-run-tempest.groovy
@@ -0,0 +1,60 @@
+/**
+ *
+ * Service test pipeline
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL                 URL of Salt master
+ *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ * Test settings:
+ *   IMAGE_LINK                      Link to docker image with Rally and Tempest
+ *   TEST_TEMPEST_PATTERN            If not false, run tests matched to pattern only
+ *   TEST_TARGET                     Salt target for tempest node
+ *   CLEANUP_REPORTS_AND_CONTAINER   Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
+ *   DO_CLEANUP_RESOURCES            If "true": runs clean-up script for removing Rally and Tempest resources
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+test = new com.mirantis.mk.Test()
+
+// Define global variables
+def saltMaster
+
+node("python") {
+    try {
+
+        //
+        // Prepare connection
+        //
+        stage ('Connect to salt master') {
+            // Connect to Salt master
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        //
+        // Test
+        //
+
+        stage('Run OpenStack Tempest tests') {
+            test.runTempestTests(saltMaster, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
+                    DO_CLEANUP_RESOURCES)
+        }
+        stage('Copy test reports') {
+            test.copyTempestResults(saltMaster, TEST_TARGET)
+        }
+        stage('Archiving test artifacts') {
+            test.archiveRallyArtifacts(saltMaster, TEST_TARGET)
+        }
+    } catch (Throwable e) {
+        currentBuild.result = 'FAILURE'
+        throw e
+    } finally {
+        if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
+            stage('Cleanup reports and container') {
+                test.removeReports(saltMaster, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+                test.removeDockerContainer(saltMaster, TEST_TARGET, IMAGE_LINK)
+            }
+        }
+    }
+}
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
new file mode 100644
index 0000000..4edcdb7
--- /dev/null
+++ b/test-salt-formulas-env.groovy
@@ -0,0 +1,94 @@
+/**
+ * Test salt formulas pipeline
+ *  DEFAULT_GIT_REF
+ *  DEFAULT_GIT_URL
+ *  CREDENTIALS_ID
+ */
+def common = new com.mirantis.mk.Common()
+def ruby = new com.mirantis.mk.Ruby()
+def gerrit = new com.mirantis.mk.Gerrit()
+
+def defaultGitRef, defaultGitUrl
+try {
+  defaultGitRef = DEFAULT_GIT_REF
+  defaultGitUrl = DEFAULT_GIT_URL
+} catch (MissingPropertyException e) {
+  defaultGitRef = null
+  defaultGitUrl = null
+}
+
+def checkouted = false
+
+throttle(['test-formula']) {
+  node("python") {
+    try {
+      stage("checkout") {
+        if (defaultGitRef && defaultGitUrl) {
+          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
+        } else {
+          throw new Exception("Cannot checkout gerrit patchset, DEFAULT_GIT_REF is null")
+        }
+      }
+      stage("cleanup") {
+        if (checkouted) {
+          sh("make clean")
+        }
+      }
+      stage("kitchen") {
+        if (checkouted) {
+          if (fileExists(".kitchen.yml")) {
+            common.infoMsg(".kitchen.yml found, running kitchen tests")
+            ruby.ensureRubyEnv()
+            if (fileExists(".travis.yml")) {
+              common.infoMsg(".travis.yml found, running custom kitchen init")
+              def kitchenConfigYML = readYaml(file: ".travis.yml")
+              def kitchenInit = kitchenConfigYML["install"]
+              def kitchenInstalled = false
+              if (kitchenInit && !kitchenInit.isEmpty()) {
+                for (int i = 0; i < kitchenInit.size(); i++) {
+                  if (kitchenInit[i].trim().startsWith("test -e Gemfile")) { //found Gemfile config
+                    common.infoMsg("Custom Gemfile configuration found, using them")
+                    ruby.installKitchen(kitchenInit[i].trim())
+                    kitchenInstalled = true
+                  }
+                }
+              }
+              if (!kitchenInstalled) {
+                ruby.installKitchen()
+              }
+            } else {
+              common.infoMsg(".travis.yml not found, running default kitchen init")
+              ruby.installKitchen()
+            }
+            common.infoMsg("Running part of kitchen test")
+            if (KITCHEN_ENV != null && !KITCHEN_ENV.isEmpty() && KITCHEN_ENV != "") {
+              def cleanEnv = KITCHEN_ENV.replaceAll("\\s?SUITE=[^\\s]*", "")
+              def suite = ruby.getSuiteName(KITCHEN_ENV)
+              if (suite && suite != "") {
+                common.infoMsg("Running kitchen test with environment:" + KITCHEN_ENV.trim())
+                ruby.runKitchenTests(cleanEnv, suite)
+              } else {
+                common.warningMsg("No SUITE was found. Running with all suites.")
+                ruby.runKitchenTests(cleanEnv, "")
+              }
+            } else {
+              throw new Exception("KITCHEN_ENV parameter is empty or invalid. This may indicate wrong env settings of initial test job or .travis.yml file.")
+            }
+          } else {
+            throw new Exception(".kitchen.yml file not found, no kitchen tests triggered.")
+          }
+        }
+      }
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      ruby.runKitchenCommand("destroy")
+      throw e
+    } finally {
+      if (currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")) {
+        common.errorMsg("----------------KITCHEN LOG:---------------")
+        println readFile(".kitchen/logs/kitchen.log")
+      }
+    }
+  }
+}
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 8ad5f1c..4422ca6 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -16,115 +16,123 @@
   gerritRef = null
 }
 
+def parallelGroupSize
+try {
+  parallelGroupSize = Integer.valueOf(PARALLEL_GROUP_SIZE)
+} catch (MissingPropertyException e) {
+  parallelGroupSize = 8
+}
+
 def defaultGitRef, defaultGitUrl
 try {
-    defaultGitRef = DEFAULT_GIT_REF
-    defaultGitUrl = DEFAULT_GIT_URL
+  defaultGitRef = DEFAULT_GIT_REF
+  defaultGitUrl = DEFAULT_GIT_URL
 } catch (MissingPropertyException e) {
-    defaultGitRef = null
-    defaultGitUrl = null
+  defaultGitRef = null
+  defaultGitUrl = null
 }
 
 def checkouted = false
 
 node("python") {
-  try{
+  try {
     stage("checkout") {
       if (gerritRef) {
         // job is triggered by Gerrit
         def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
         // test if gerrit change is already Verified
-        if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified","+")){
+        if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")) {
           common.successMsg("Gerrit change ${GERRIT_CHANGE_NUMBER} patchset ${GERRIT_PATCHSET_NUMBER} already has Verified, skipping tests") // do nothing
-        // test WIP contains in commit message
-        }else if(gerritChange.commitMessage.contains("WIP")){
+          // test WIP contains in commit message
+        } else if (gerritChange.commitMessage.contains("WIP")) {
           common.successMsg("Commit message contains WIP, skipping tests") // do nothing
-        }else{
+        } else {
           // test if change aren't already merged
           def merged = gerritChange.status == "MERGED"
-          if(!merged){
-            checkouted = gerrit.gerritPatchsetCheckout ([
-              credentialsId : CREDENTIALS_ID
+          if (!merged) {
+            checkouted = gerrit.gerritPatchsetCheckout([
+              credentialsId: CREDENTIALS_ID
             ])
-          } else{
+          } else {
             common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
           }
         }
-      } else if(defaultGitRef && defaultGitUrl) {
-          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
+        defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
+        defaultGitRef = GERRIT_REFSPEC
+      } else if (defaultGitRef && defaultGitUrl) {
+        checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
       } else {
         throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
       }
     }
     stage("test") {
-      if(checkouted){
+      if (checkouted) {
         sh("make clean")
         sh("[ $SALT_VERSION != 'latest' ] || export SALT_VERSION=''; make test")
       }
     }
     stage("kitchen") {
-      if(checkouted){
+      if (checkouted) {
         if (fileExists(".kitchen.yml")) {
           common.infoMsg(".kitchen.yml found, running kitchen tests")
-          ruby.ensureRubyEnv()
           def kitchenEnvs = []
           def filteredEnvs = []
-          if(fileExists(".travis.yml")){
-            common.infoMsg(".travis.yml found, running custom kitchen init")
+          if (fileExists(".travis.yml")) {
+            common.infoMsg(".travis.yml file found.")
             def kitchenConfigYML = readYaml(file: ".travis.yml")
-            if(kitchenConfigYML.containsKey("env")){
-              kitchenEnvs=kitchenConfigYML["env"]
-            }
-            def kitchenInit = kitchenConfigYML["install"]
-            def kitchenInstalled = false
-            if(kitchenInit && !kitchenInit.isEmpty()){
-              for(int i=0; i<kitchenInit.size(); i++){
-                if(kitchenInit[i].trim().startsWith("test -e Gemfile")){ //found Gemfile config
-                  common.infoMsg("Custom Gemfile configuration found, using them")
-                  ruby.installKitchen(kitchenInit[i].trim())
-                  kitchenInstalled = true
-                }
-              }
-            }
-            if(!kitchenInstalled){
-              ruby.installKitchen()
-            }
-          }else{
-            common.infoMsg(".travis.yml not found, running default kitchen init")
-            ruby.installKitchen()
-          }
-          common.infoMsg("Running kitchen testing, parallel mode: " + KITCHEN_TESTS_PARALLEL.toBoolean())
-
-          if(CUSTOM_KITCHEN_ENVS != null && CUSTOM_KITCHEN_ENVS != ''){
-              filteredEnvs = CUSTOM_KITCHEN_ENVS.tokenize('\n')
-            } else {
-              filteredEnvs = ruby.filterKitchenEnvs(kitchenEnvs).unique()
-            }
-            // Allow custom filteredEnvs in case of empty kitchenEnvs
-          if((kitchenEnvs && !kitchenEnvs.isEmpty() && !filteredEnvs.isEmpty()) || ((kitchenEnvs==null || kitchenEnvs=='') && !filteredEnvs.isEmpty())){
-            for(int i=0; i<filteredEnvs.size(); i++){
-              common.infoMsg("Found " + filteredEnvs.size() + " environment, kitchen running with env number " + (i+1) + ": " + filteredEnvs[i].trim())
-              ruby.runKitchenTests(filteredEnvs[i].trim(), KITCHEN_TESTS_PARALLEL.toBoolean())
+            if (kitchenConfigYML.containsKey("env")) {
+              kitchenEnvs = kitchenConfigYML["env"]
             }
           } else {
-            ruby.runKitchenTests("", KITCHEN_TESTS_PARALLEL.toBoolean())
+            common.warningMsg(".travis.yml file not found, suites must be passed via CUSTOM_KITCHEN_ENVS parameter.")
           }
-        } else {
-          common.infoMsg(".kitchen.yml not found")
+          common.infoMsg("Running kitchen testing in parallel mode")
+          if (CUSTOM_KITCHEN_ENVS != null && CUSTOM_KITCHEN_ENVS != '') {
+            kitchenEnvs = CUSTOM_KITCHEN_ENVS.tokenize('\n')
+            common.infoMsg("CUSTOM_KITCHEN_ENVS not empty. Running with custom enviroments: ${kitchenEnvs}")
+          }
+          if (kitchenEnvs != null && kitchenEnvs != '') {
+            def acc = 0
+            def kitchenTestRuns = [:]
+            common.infoMsg("Found " + kitchenEnvs.size() + " environment(s)")
+            for (int i = 0; i < kitchenEnvs.size(); i++) {
+              if (acc >= parallelGroupSize) {
+                parallel kitchenTestRuns
+                kitchenTestRuns = [:]
+                acc = 0
+              }
+              def testEnv = kitchenEnvs[i]
+              kitchenTestRuns[testEnv] = {
+                build job: "test-salt-formulas-env", parameters: [
+                  [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
+                  [$class: 'StringParameterValue', name: 'KITCHEN_ENV', value: testEnv],
+                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: defaultGitRef],
+                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: defaultGitUrl],
+                  [$class: 'StringParameterValue', name: 'SALT_OPTS', value: SALT_OPTS],
+                  [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
+                ]
+              }
+              acc++;
+            }
+            if (acc != 0) {
+              parallel kitchenTestRuns
+            }
+          } else {
+            common.warningMsg(".kitchen.yml file not found, no kitchen tests triggered.")
+          }
         }
       }
     }
   } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     ruby.runKitchenCommand("destroy")
-     throw e
+    // If there was an error or exception thrown, the build failed
+    currentBuild.result = "FAILURE"
+    currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+    throw e
   } finally {
-     if(currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")){
-        common.errorMsg("----------------KITCHEN LOG:---------------")
-        println readFile(".kitchen/logs/kitchen.log")
-     }
-     common.sendNotification(currentBuild.result,"",["slack"])
+    if (currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")) {
+      common.errorMsg("----------------KITCHEN LOG:---------------")
+      println readFile(".kitchen/logs/kitchen.log")
+    }
+    common.sendNotification(currentBuild.result, "", ["slack"])
   }
-}
-
+}
\ No newline at end of file
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index 913bae9..e22bcbe 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -23,43 +23,48 @@
 def defaultGitUrl = DEFAULT_GIT_URL
 
 def checkouted = false
-node("python") {
-  try{
-    stage("checkout") {
-      if(defaultGitRef != "" && defaultGitUrl != "") {
-          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
-      } else {
-        throw new Exception("Cannot checkout gerrit patchset, DEFAULT_GIT_URL or DEFAULT_GIT_REF is null")
-      }
-      if(checkouted) {
-        if (fileExists('classes/system')) {
-          if (SYSTEM_GIT_URL == "") {
-            ssh.prepareSshAgentKey(CREDENTIALS_ID)
-            dir('classes/system') {
-              remoteUrl = git.getGitRemote()
-              ssh.ensureKnownHosts(remoteUrl)
-            }
-            ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
-          } else {
-            dir('classes/system') {
-              if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
-                common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+
+throttle(['test-model']) {
+  node("python") {
+    try{
+      stage("checkout") {
+        if(defaultGitRef != "" && defaultGitUrl != "") {
+            checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
+        } else {
+          throw new Exception("Cannot checkout gerrit patchset, DEFAULT_GIT_URL or DEFAULT_GIT_REF is null")
+        }
+        if(checkouted) {
+          if (fileExists('classes/system')) {
+            if (SYSTEM_GIT_URL == "") {
+              ssh.prepareSshAgentKey(CREDENTIALS_ID)
+              dir('classes/system') {
+                remoteUrl = git.getGitRemote()
+                ssh.ensureKnownHosts(remoteUrl)
+              }
+              ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
+            } else {
+              dir('classes/system') {
+                if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
+                  common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+                }
               }
             }
           }
         }
       }
-    }
 
-    stage("test node") {
-      if (checkouted) {
-        def workspace = common.getWorkspace()
-        saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger())
+      stage("test node") {
+        if (checkouted) {
+          def workspace = common.getWorkspace()
+          common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
+          saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger())
+        }
       }
+    } catch (Throwable e) {
+       // If there was an error or exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
     }
-  } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     throw e
   }
 }
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 9bd2ca5..2ff76d2 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -80,12 +80,24 @@
 
     stage("test-nodes") {
       if(checkouted) {
-        def nodes = sh(script: "find ./nodes -type f -name '${config_node_name_pattern}*.yml'", returnStdout: true).tokenize()
+        def infraYMLs = sh(script: "find ./classes/ -regex '.*cluster/[-_a-zA-Z0-9]*/[infra/]*init\\.yml' -exec grep -il 'cluster_name' {} \\;", returnStdout: true).tokenize()
         def branches = [:]
         def acc = 0
-        for (int i = 0; i < nodes.size(); i++) {
-          def testTarget = sh(script: "basename ${nodes[i]} .yml", returnStdout: true).trim()
-          def clusterName = testTarget.substring(testTarget.indexOf(".") + 1, testTarget.lastIndexOf("."))
+        for (int i = 0; i < infraYMLs.size(); i++) {
+          def infraYMLConfig = readYaml(file: infraYMLs[i])
+          if(!infraYMLConfig["parameters"].containsKey("_param")){
+              common.warningMsg("ERROR: Cannot find soft params (_param) in file " + infraYMLs[i] + " for obtain a cluster info. Skipping test.")
+              continue
+          }
+          def infraParams = infraYMLConfig["parameters"]["_param"];
+          if(!infraParams.containsKey("infra_config_hostname") || !infraParams.containsKey("cluster_name") || !infraParams.containsKey("cluster_domain")){
+              common.warningMsg("ERROR: Cannot find _param:infra_config_hostname or _param:cluster_name or _param:cluster_domain  in file " + infraYMLs[i] + " for obtain a cluster info. Skipping test.")
+              continue
+          }
+          def clusterName = infraParams["cluster_name"]
+          def clusterDomain = infraParams["cluster_domain"]
+          def configHostname = infraParams["infra_config_hostname"]
+          def testTarget = String.format("%s.%s", configHostname, clusterDomain)
           if (acc >= PARALLEL_NODE_GROUP_SIZE.toInteger()) {
             parallel branches
             branches = [:]
@@ -115,6 +127,7 @@
     }
   } catch (Throwable e) {
      currentBuild.result = "FAILURE"
+     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
      throw e
   } finally {
      common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index 8a8fbac..411edfc 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -76,6 +76,7 @@
 } catch (Throwable e) {
     // If there was an error or exception thrown, the build failed
     currentBuild.result = "FAILURE"
+    currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
     throw e
 } finally {
     common.sendNotification(currentBuild.result,"",["slack"])
diff --git a/update-jenkins-master-jobs.groovy b/update-jenkins-master-jobs.groovy
new file mode 100644
index 0000000..56edb10
--- /dev/null
+++ b/update-jenkins-master-jobs.groovy
@@ -0,0 +1,34 @@
+/**
+ * Update Jenkins master jobs
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *   TARGET_SERVERS             Server to update
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def target = ['expression': TARGET_SERVERS, 'type': 'compound']
+def result
+
+node("python") {
+    try {
+
+        stage('Connect to Salt master') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage('Update Jenkins jobs') {
+            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'jenkins.client')
+            salt.checkResult(result)
+        }
+
+    } catch (Throwable e) {
+        currentBuild.result = 'FAILURE'
+        throw e
+    }
+}
diff --git a/update-package.groovy b/update-package.groovy
index ea2259c..c946123 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -53,7 +53,11 @@
         }
 
         stage("List package upgrades") {
+            common.infoMsg("Listing all the packages that have a new update available on test nodes: ${targetTestSubset}")
             salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+            if(TARGET_PACKAGES != "" && TARGET_PACKAGES != "*"){
+                common.infoMsg("Note that only the ${TARGET_PACKAGES} would be installed from the above list of available updates on the ${targetTestSubset}")
+            }
         }
 
         stage('Confirm live package upgrades on sample') {
@@ -102,6 +106,7 @@
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
         throw e
     }
 }
diff --git a/update-reclass-metadata.groovy b/update-reclass-metadata.groovy
new file mode 100644
index 0000000..6fb539a
--- /dev/null
+++ b/update-reclass-metadata.groovy
@@ -0,0 +1,35 @@
+/**
+ * Update reclass model on salt master
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *   TARGET_SERVERS             Server to update
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def target = ['expression': TARGET_SERVERS, 'type': 'compound']
+def result
+
+node("python") {
+    try {
+
+        stage('Connect to Salt master') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage('Update Reclass model') {
+            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'reclass.storage')
+            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'reclass.storage.node')
+            salt.checkResult(result)
+        }
+
+    } catch (Throwable e) {
+        currentBuild.result = 'FAILURE'
+        throw e
+    }
+}
diff --git a/update-salt-master-formulas.groovy b/update-salt-master-formulas.groovy
new file mode 100644
index 0000000..f3e7d1c
--- /dev/null
+++ b/update-salt-master-formulas.groovy
@@ -0,0 +1,34 @@
+/**
+ * Update formulas on salt master
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *   TARGET_SERVERS             Server to update
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def target = ['expression': TARGET_SERVERS, 'type': 'compound']
+def result
+
+node("python") {
+    try {
+
+        stage('Connect to Salt master') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage('Update Salt formulas') {
+            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'salt.master.env')
+            salt.checkResult(result)
+        }
+
+    } catch (Throwable e) {
+        currentBuild.result = 'FAILURE'
+        throw e
+    }
+}
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 8381d6e..5768f59 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -12,6 +12,13 @@
  *   RUN_TEMPEST_TESTS           If not false, run Tempest tests
  *   RUN_RALLY_TESTS             If not false, run Rally tests
  *   RUN_K8S_TESTS               If not false, run Kubernetes tests
+ *   RUN_SPT_TESTS               If not false, run SPT tests
+ *   SPT_SSH_USER                The name of the user which should be used for ssh to nodes
+ *   SPT_FLOATING_NETWORK        The name of the external(floating) network
+ *   SPT_IMAGE                   The name of the image for SPT tests
+ *   SPT_USER                    The name of the user for SPT image
+ *   SPT_FLAVOR                  The name of the flavor for SPT image
+ *   SPT_AVAILABILITY_ZONE       The name of availability zone
  *   TEST_K8S_API_SERVER         Kubernetes API address
  *   TEST_K8S_CONFORMANCE_IMAGE  Path to docker image with conformance e2e tests
  *
@@ -34,7 +41,11 @@
         stage('Configure') {
             validate.installDocker(saltMaster, TARGET_NODE)
             sh "mkdir -p ${artifacts_dir}"
-            validate.runContainerConfiguration(saltMaster, TEST_IMAGE, TARGET_NODE, artifacts_dir)
+            def spt_variables = "-e spt_ssh_user=${SPT_SSH_USER} " +
+                    "-e spt_floating_network=${SPT_FLOATING_NETWORK} " +
+                    "-e spt_image=${SPT_IMAGE} -e spt_user=${SPT_USER} " +
+                    "-e spt_flavor=${SPT_FLAVOR} -e spt_availability_zone=${SPT_AVAILABILITY_ZONE} "
+            validate.runContainerConfiguration(saltMaster, TEST_IMAGE, TARGET_NODE, artifacts_dir, spt_variables)
         }
 
         stage('Run Tempest tests') {
@@ -53,6 +64,14 @@
             }
         }
 
+        stage('Run SPT tests') {
+            if (RUN_SPT_TESTS.toBoolean() == true) {
+                validate.runSptTests(saltMaster, TARGET_NODE, artifacts_dir)
+            } else {
+                common.infoMsg("Skipping SPT tests")
+            }
+        }
+
         stage('Run k8s bootstrap tests') {
             if (RUN_K8S_TESTS.toBoolean() == true) {
                 def image = 'tomkukral/k8s-scripts'
@@ -91,6 +110,7 @@
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
         throw e
     } finally {
         validate.runCleanup(saltMaster, TARGET_NODE, artifacts_dir)