Merge "Revert adding TEMPEST_VERSION Change workflow for cvp-job. Oleksii Zhurba, Denis Meltsaykin and I desided that all custom settings in cvp jobs should be taked out from jenkins parameters.  These custom values (e.g. version of tempest) will be indicated in bash script from 'cvp-configuration' repository. "
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 2361098..601d74d 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -11,6 +11,7 @@
  *  OSD                                 Failed OSD ids to be replaced (comma-separated list - 1,2,3)
  *  DEVICE                              Comma separated list of failed devices that will be replaced at HOST (/dev/sdb,/dev/sdc)
  *  JOURNAL_BLOCKDB_BLOCKWAL_PARTITION  Comma separated list of partitions where journal or block_db or block_wal for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
+ *  DATA_PARTITION                      Comma separated list of mounted partitions of failed device. These partitions will be unmounted. For ex. /dev/sdb1,/dev/sdb3
  *  CLUSTER_FLAGS                       Comma separated list of tags to apply to cluster
  *  WAIT_FOR_HEALTHY                    Wait for cluster rebalance before stoping daemons
  *  DMCRYPT                             Set to True if replacing osds are/were encrypted
@@ -26,6 +27,7 @@
 def osds = OSD.tokenize(',')
 def devices = DEVICE.tokenize(',')
 def journals_blockdbs_blockwals = JOURNAL_BLOCKDB_BLOCKWAL_PARTITION.tokenize(',')
+def mounted_partitions = DATA_PARTITION.tokenize(',')
 
 
 def runCephCommand(master, target, cmd) {
@@ -158,7 +160,6 @@
             }
 
 
-
             // zap disks `ceph-disk zap /dev/sdi`
             stage('Zap devices') {
                 for (dev in devices) {
@@ -174,9 +175,23 @@
         } else {
 
             // umount `umount /dev/sdi1`
-            stage('Umount devices') {
-                for (dev in devices) {
-                    runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+            stage('Umount partitions') {
+                if (mounted_partitions == null || mounted_partitions.empty) {
+                    for (dev in devices) {
+                        try {
+                            runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
+                    }
+                } else {
+                    for (part in mounted_partitions) {
+                        try {
+                            runCephCommand(pepperEnv, HOST, 'umount ' + part)
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
+                    }
                 }
             }
 
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index a7f35d5..a4ed1d6 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -224,6 +224,17 @@
                             envParams.put('cfg_saltversion', SALT_VERSION)
                         }
 
+                        // If stack wasn't removed by the same user which has created it,
+                        // nova key pair won't be removed, so need to make sure that no
+                        // key pair with the same name exists before creating the stack.
+                        if (openstack.getKeyPair(openstackCloud, STACK_NAME, venv)){
+                            try {
+                                openstack.deleteKeyPair(openstackCloud, STACK_NAME, venv)
+                            } catch (Exception e) {
+                                common.errorMsg("Key pair failed to remove with error ${e.message}")
+                            }
+                        }
+
                         openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
                     }
 
@@ -329,7 +340,7 @@
 
                     if (common.checkContains('STACK_INSTALL', 'kvm')) {
                         orchestrate.installInfraKvm(venvPepper, extra_tgt)
-                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_target)
+                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt)
                     }
 
                     orchestrate.validateFoundationInfra(venvPepper, extra_tgt)
diff --git a/deploy-virtual-edge-mom.groovy b/deploy-virtual-edge-mom.groovy
index af0d8a7..875195b 100644
--- a/deploy-virtual-edge-mom.groovy
+++ b/deploy-virtual-edge-mom.groovy
@@ -14,7 +14,7 @@
  *   STACK_TEMPLATE             File with stack template
  *   STACK_TEST                 Run tests (bool)
  *   EDGE_DEPLOY_SCHEMAS        Env schemas to deploy as edge clouds
- *   MOM_JOB                    Type of Master-of-Masters stack
+ *   MOM_JOB                    Job name to deploy Master-of-Masters stack
  */
 
 common = new com.mirantis.mk.Common()
@@ -31,7 +31,7 @@
     deployMoMJob = MOM_JOB
 }
 
-def deploy_schemas = '{os_ha_ovs: {deploy_job_name: "deploy-heat-os_ha_ovs", properties: {SLAVE_NODE: "python", STACK_INSTALL: "openstack,ovs", STACK_TEMPLATE: "os_ha_ovs", STACK_TYPE: "heat", FORMULA_PKG_REVISION: "testing", STACK_DELETE: false, STACK_CLUSTER_NAME: "os-ha-ovs"}}}'
+def deploy_schemas = '{os_ha_ovs: {deploy_job_name: "deploy-heat-os_ha_ovs", properties: {SLAVE_NODE: "python", STACK_INSTALL: "openstack,ovs", STACK_TEMPLATE: "os_ha_ovs", STACK_TYPE: "heat", FORMULA_PKG_REVISION: "testing", STACK_DELETE: false, STACK_CLUSTER_NAME: "os-ha-ovs", STACK_RECLASS_ADDRESS: "", STACK_RECLASS_BRANCH: ""}}}'
 if (common.validInputParam('EDGE_DEPLOY_SCHEMAS')) {
     deploy_schemas = EDGE_DEPLOY_SCHEMAS
 }
@@ -142,6 +142,8 @@
                             [$class: 'StringParameterValue', name: 'STACK_TEMPLATE_URL', value: STACK_TEMPLATE_URL],
                             [$class: 'StringParameterValue', name: 'STACK_TEMPLATE_BRANCH', value: 'master'],
                             [$class: 'StringParameterValue', name: 'STACK_TYPE', value: 'heat'],
+                            [$class: 'StringParameterValue', name: 'STACK_RECLASS_ADDRESS', value: props['STACK_RECLASS_ADDRESS']],
+                            [$class: 'StringParameterValue', name: 'STACK_RECLASS_BRANCH', value: props['STACK_RECLASS_BRANCH']],
                             [$class: 'StringParameterValue', name: 'FORMULA_PKG_REVISION', value: props['FORMULA_PKG_REVISION']],
                             [$class: 'StringParameterValue', name: 'STACK_CLUSTER_NAME', value: props['STACK_CLUSTER_NAME']],
                             [$class: 'StringParameterValue', name: 'STACK_TEST', value: ''],
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 08ac439..ebbfc86 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -7,6 +7,7 @@
  *   TARGET_REGISTRY                           Target Docker Registry name
  *   REGISTRY_URL                              Target Docker Registry URL
  *   IMAGE_TAG                                 Tag to use when pushing images
+ *   SOURCE_IMAGE_TAG                          Tag to use when pulling images(optional,if SOURCE_IMAGE_TAG has been found)
  *   IMAGE_LIST                                List of images to mirror
  *
  */
@@ -39,6 +40,10 @@
                     }
                     imageArray = image.trim().tokenize(' ')
                     imagePath = imageArray[0]
+                    if (imagePath.contains('SUBS_SOURCE_IMAGE_TAG')) {
+                        common.warningMsg("Replacing SUBS_SOURCE_IMAGE_TAG => ${SOURCE_IMAGE_TAG}")
+                        imagePath.replace('SUBS_SOURCE_IMAGE_TAG', SOURCE_IMAGE_TAG)
+                    }
                     targetRegistry = imageArray[1]
                     imageName = getImageName(imagePath)
                     sh """docker pull ${imagePath}
@@ -52,4 +57,4 @@
             throw e
         }
     }
-}
\ No newline at end of file
+}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 6964653..12dc88d 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -5,7 +5,7 @@
  *   COOKIECUTTER_TEMPLATE_CONTEXT      Context parameters for the template generation.
  *   EMAIL_ADDRESS                      Email to send a created tar file
  *
-**/
+ **/
 
 common = new com.mirantis.mk.Common()
 git = new com.mirantis.mk.Git()
@@ -13,143 +13,148 @@
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
 ssh = new com.mirantis.mk.Ssh()
 
+def reclassVersion = 'v1.5.4'
+if (common.validInputParam('RECLASS_VERSION')) {
+  reclassVersion = RECLASS_VERSION
+}
+
 timeout(time: 12, unit: 'HOURS') {
-    node("python&&docker") {
-        def templateEnv = "${env.WORKSPACE}/template"
-        def modelEnv = "${env.WORKSPACE}/model"
-        def testEnv = "${env.WORKSPACE}/test"
-        def pipelineEnv = "${env.WORKSPACE}/pipelines"
+  node("python&&docker") {
+    def templateEnv = "${env.WORKSPACE}/template"
+    def modelEnv = "${env.WORKSPACE}/model"
+    def testEnv = "${env.WORKSPACE}/test"
+    def pipelineEnv = "${env.WORKSPACE}/pipelines"
 
-        try {
-            def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
-            def mcpVersion = templateContext.default_context.mcp_version
-            def sharedReclassUrl = templateContext.default_context.shared_reclass_url
-            def clusterDomain = templateContext.default_context.cluster_domain
-            def clusterName = templateContext.default_context.cluster_name
-            def saltMaster = templateContext.default_context.salt_master_hostname
-            def localRepositories = templateContext.default_context.local_repositories.toBoolean()
-            def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
-            def cutterEnv = "${env.WORKSPACE}/cutter"
-            def jinjaEnv = "${env.WORKSPACE}/jinja"
-            def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
-            def systemEnv = "${modelEnv}/classes/system"
-            def targetBranch = "feature/${clusterName}"
-            def templateBaseDir = "${env.WORKSPACE}/template"
-            def templateDir = "${templateEnv}/template/dir"
-            def templateOutputDir = templateBaseDir
-            def user
-            wrap([$class: 'BuildUser']) {
-                user = env.BUILD_USER_ID
+    try {
+      def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+      def mcpVersion = templateContext.default_context.mcp_version
+      def sharedReclassUrl = templateContext.default_context.shared_reclass_url
+      def clusterDomain = templateContext.default_context.cluster_domain
+      def clusterName = templateContext.default_context.cluster_name
+      def saltMaster = templateContext.default_context.salt_master_hostname
+      def localRepositories = templateContext.default_context.local_repositories.toBoolean()
+      def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
+      def cutterEnv = "${env.WORKSPACE}/cutter"
+      def jinjaEnv = "${env.WORKSPACE}/jinja"
+      def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+      def systemEnv = "${modelEnv}/classes/system"
+      def targetBranch = "feature/${clusterName}"
+      def templateBaseDir = "${env.WORKSPACE}/template"
+      def templateDir = "${templateEnv}/template/dir"
+      def templateOutputDir = templateBaseDir
+      def user
+      wrap([$class: 'BuildUser']) {
+        user = env.BUILD_USER_ID
+      }
+
+      currentBuild.description = clusterName
+      print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+
+      stage ('Download Cookiecutter template') {
+        def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
+        def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
+        git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
+        // Use refspec if exists first of all
+        if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
+          dir(templateEnv) {
+            ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
+          }
+        } else {
+          // Use mcpVersion git tag if not specified branch for cookiecutter-templates
+          if (cookiecutterTemplateBranch == '') {
+            cookiecutterTemplateBranch = mcpVersion
+            // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
+            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+              cookiecutterTemplateBranch = 'master'
             }
+          }
+          git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
+        }
+      }
 
-            currentBuild.description = clusterName
-            print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+      stage ('Create empty reclass model') {
+        dir(path: modelEnv) {
+          sh "rm -rfv .git"
+          sh "git init"
+          ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
+        }
 
-            stage ('Download Cookiecutter template') {
-                def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
-                def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
-                git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
-                // Use refspec if exists first of all
-                if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
-                    dir(templateEnv) {
-                        ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
-                    }
-                } else {
-                    // Use mcpVersion git tag if not specified branch for cookiecutter-templates
-                    if (cookiecutterTemplateBranch == '') {
-                        cookiecutterTemplateBranch = mcpVersion
-                        // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
-                        if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
-                            cookiecutterTemplateBranch = 'master'
-                        }
-                    }
-                    git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
-                }
+        def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
+        // Use refspec if exists first of all
+        if (sharedReclassBranch.toString().startsWith('refs/')) {
+          dir(systemEnv) {
+            ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
+          }
+        } else {
+          // Use mcpVersion git tag if not specified branch for reclass-system
+          if (sharedReclassBranch == '') {
+            sharedReclassBranch = mcpVersion
+            // Don't have nightly/testing/stable for reclass-system repo, therefore use master
+            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+              sharedReclassBranch = 'master'
             }
+          }
+          git.changeGitBranch(systemEnv, sharedReclassBranch)
+        }
+        git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
+      }
 
-            stage ('Create empty reclass model') {
-                dir(path: modelEnv) {
-                    sh "rm -rfv .git"
-                    sh "git init"
-                    ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
-                }
+      def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
+      for (product in productList) {
 
-                def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
-                // Use refspec if exists first of all
-                if (sharedReclassBranch.toString().startsWith('refs/')) {
-                    dir(systemEnv) {
-                        ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
-                    }
-                } else {
-                    // Use mcpVersion git tag if not specified branch for reclass-system
-                    if (sharedReclassBranch == '') {
-                        sharedReclassBranch = mcpVersion
-                        // Don't have nightly/testing/stable for reclass-system repo, therefore use master
-                        if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
-                            sharedReclassBranch = 'master'
-                        }
-                    }
-                    git.changeGitBranch(systemEnv, sharedReclassBranch)
-                }
-                git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
-            }
+        // get templateOutputDir and productDir
+        if (product.startsWith("stacklight")) {
+          templateOutputDir = "${env.WORKSPACE}/output/stacklight"
 
-            def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
-            for (product in productList) {
+          def stacklightVersion
+          try {
+            stacklightVersion = templateContext.default_context['stacklight_version']
+          } catch (Throwable e) {
+            common.warningMsg('Stacklight version loading failed')
+          }
 
-                // get templateOutputDir and productDir
-                if (product.startsWith("stacklight")) {
-                    templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+          if (stacklightVersion) {
+            productDir = "stacklight" + stacklightVersion
+          } else {
+            productDir = "stacklight1"
+          }
 
-                    def stacklightVersion
-                    try {
-                        stacklightVersion = templateContext.default_context['stacklight_version']
-                    } catch (Throwable e) {
-                        common.warningMsg('Stacklight version loading failed')
-                    }
+        } else {
+          templateOutputDir = "${env.WORKSPACE}/output/${product}"
+          productDir = product
+        }
 
-                    if (stacklightVersion) {
-                        productDir = "stacklight" + stacklightVersion
-                    } else {
-                        productDir = "stacklight1"
-                    }
+        if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+            && templateContext.default_context["${product}_enabled"].toBoolean())) {
 
-                } else {
-                    templateOutputDir = "${env.WORKSPACE}/output/${product}"
-                    productDir = product
-                }
+          templateDir = "${templateEnv}/cluster_product/${productDir}"
+          common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
 
-                if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                    && templateContext.default_context["${product}_enabled"].toBoolean())) {
+          sh "rm -rf ${templateOutputDir} || true"
+          sh "mkdir -p ${templateOutputDir}"
+          sh "mkdir -p ${outputDestination}"
 
-                    templateDir = "${templateEnv}/cluster_product/${productDir}"
-                    common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+          python.setupCookiecutterVirtualenv(cutterEnv)
+          python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
+          sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+        } else {
+          common.warningMsg("Product " + product + " is disabled")
+        }
+      }
 
-                    sh "rm -rf ${templateOutputDir} || true"
-                    sh "mkdir -p ${templateOutputDir}"
-                    sh "mkdir -p ${outputDestination}"
+      if(localRepositories && !offlineDeployment){
+        def aptlyModelUrl = templateContext.default_context.local_model_url
+        dir(path: modelEnv) {
+          ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
+          if(!(mcpVersion in ["nightly", "testing", "stable"])){
+            ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
+          }
+        }
+      }
 
-                    python.setupCookiecutterVirtualenv(cutterEnv)
-                    python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
-                    sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-                } else {
-                    common.warningMsg("Product " + product + " is disabled")
-                }
-            }
-
-            if(localRepositories && !offlineDeployment){
-                def aptlyModelUrl = templateContext.default_context.local_model_url
-                dir(path: modelEnv) {
-                    ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
-                        if(!(mcpVersion in ["nightly", "testing", "stable"])){
-                        ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
-                    }
-                }
-            }
-
-            stage('Generate new SaltMaster node') {
-                def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
-                def nodeString = """classes:
+      stage('Generate new SaltMaster node') {
+        def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
+        def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
   _param:
@@ -160,145 +165,153 @@
       name: ${saltMaster}
       domain: ${clusterDomain}
     """
-                sh "mkdir -p ${modelEnv}/nodes/"
-                writeFile(file: nodeFile, text: nodeString)
+        sh "mkdir -p ${modelEnv}/nodes/"
+        writeFile(file: nodeFile, text: nodeString)
 
-                git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
-            }
+        git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
+      }
 
-            stage("Test") {
-                if (sharedReclassUrl != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
-                    sh("cp -r ${modelEnv} ${testEnv}")
-                    def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-                    saltModelTesting.setupAndTestNode(
-                            "${saltMaster}.${clusterDomain}",
-                            "",
-                            "",
-                            testEnv,
-                            'pkg',
-                            'stable',
-                            'master',
-                            0,
-                            false,
-                            false,
-                            '',
-                            '',
-                            DockerCName)
-                }
-            }
-
-            stage("Generate config drives") {
-                // apt package genisoimage is required for this stage
-
-                // download create-config-drive
-                // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
-                def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
-                if (mcpCommonScriptsBranch == '') {
-                    mcpCommonScriptsBranch = mcpVersion
-                    // Don't have nightly for mcp-common-scripts repo, therefore use master
-                    if(mcpVersion == "nightly"){
-                        mcpCommonScriptsBranch = 'master'
-                    }
-                }
-                def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
-                def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
-
-                sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
-                sh "wget -O user_data.sh ${user_data_script_url}"
-
-                sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
-                sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
-                args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
-                // load data from model
-                def smc = [:]
-                smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
-                smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
-                smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-                smc['MCP_VERSION'] = "${mcpVersion}"
-                if (templateContext['default_context']['local_repositories'] == 'True'){
-                    def localRepoIP = templateContext['default_context']['local_repo_url']
-                    smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
-                    smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
-                    smc['PIPELINES_FROM_ISO'] = 'false'
-                    smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
-                    smc['LOCAL_REPOS'] = 'true'
-                }
-                if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
-                    if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
-                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    } else {
-                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    }
-                }
-
-                for (i in common.entries(smc)) {
-                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
-                }
-
-                // create cfg config-drive
-                sh "./create-config-drive ${args}"
-                sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                // save cfg iso to artifacts
-                archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
-                if (templateContext['default_context']['local_repositories'] == 'True'){
-                    def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
-                    def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
-                    sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
-
-                    def smc_apt = [:]
-                    smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                    smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
-                    smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                    smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
-                    for (i in common.entries(smc_apt)) {
-                        sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
-                    }
-
-                    // create apt config-drive
-                    sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
-                    sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                    // save apt iso to artifacts
-                    archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
-                }
-            }
-
-            stage ('Save changes reclass model') {
-                sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
-                archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
-
-                if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
-                     emailext(to: EMAIL_ADDRESS,
-                              attachmentsPattern: "output-${clusterName}/*",
-                              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                              subject: "Your Salt model ${clusterName}")
-                }
-                dir("output-${clusterName}"){
-                    deleteDir()
-                }
-            }
-
-        } catch (Throwable e) {
-             // If there was an error or exception thrown, the build failed
-             currentBuild.result = "FAILURE"
-             currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-             throw e
-        } finally {
-            stage ('Clean workspace directories') {
-                sh(returnStatus: true, script: "rm -rf ${templateEnv}")
-                sh(returnStatus: true, script: "rm -rf ${modelEnv}")
-                sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
-            }
-             // common.sendNotification(currentBuild.result,"",["slack"])
+      stage("Test") {
+        if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
+          def testResult = false
+          sh("cp -r ${modelEnv} ${testEnv}")
+          def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+          testResult = saltModelTesting.setupAndTestNode(
+              "${saltMaster}.${clusterDomain}",
+              "",
+              "",
+              testEnv,
+              'pkg',
+              'stable',
+              reclassVersion,
+              0,
+              false,
+              false,
+              '',
+              '',
+              DockerCName)
+          if (testResult) {
+            common.infoMsg("Test finished: SUCCESS")
+          } else {
+            common.infoMsg('Test finished: FAILURE')
+            throw new RuntimeException('Test stage finished: FAILURE')
+          }
+        } else {
+          common.warningMsg("Test stage has been skipped!")
         }
+      }
+      stage("Generate config drives") {
+        // apt package genisoimage is required for this stage
+
+        // download create-config-drive
+        // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+        def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
+        if (mcpCommonScriptsBranch == '') {
+          mcpCommonScriptsBranch = mcpVersion
+          // Don't have nightly for mcp-common-scripts repo, therefore use master
+          if(mcpVersion == "nightly"){
+            mcpCommonScriptsBranch = 'master'
+          }
+        }
+        def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
+        def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
+
+        sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+        sh "wget -O user_data.sh ${user_data_script_url}"
+
+        sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+        sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+        args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
+        // load data from model
+        def smc = [:]
+        smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
+        smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+        smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+        smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+        smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+        smc['MCP_VERSION'] = "${mcpVersion}"
+        if (templateContext['default_context']['local_repositories'] == 'True'){
+          def localRepoIP = templateContext['default_context']['local_repo_url']
+          smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+          smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+          smc['PIPELINES_FROM_ISO'] = 'false'
+          smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+          smc['LOCAL_REPOS'] = 'true'
+        }
+        if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
+          if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
+            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+          } else {
+            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+          }
+        }
+
+        for (i in common.entries(smc)) {
+          sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
+        }
+
+        // create cfg config-drive
+        sh "./create-config-drive ${args}"
+        sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+
+        // save cfg iso to artifacts
+        archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+        if (templateContext['default_context']['local_repositories'] == 'True'){
+          def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+          def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
+          sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
+
+          def smc_apt = [:]
+          smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+          smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
+          smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+          smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+          for (i in common.entries(smc_apt)) {
+            sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+          }
+
+          // create apt config-drive
+          sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+          sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+          // save apt iso to artifacts
+          archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+        }
+      }
+
+      stage ('Save changes reclass model') {
+        sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
+        archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
+
+
+        if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+          emailext(to: EMAIL_ADDRESS,
+              attachmentsPattern: "output-${clusterName}/*",
+              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+              subject: "Your Salt model ${clusterName}")
+        }
+        dir("output-${clusterName}"){
+          deleteDir()
+        }
+      }
+
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      throw e
+    } finally {
+      stage ('Clean workspace directories') {
+        sh(returnStatus: true, script: "rm -rf ${templateEnv}")
+        sh(returnStatus: true, script: "rm -rf ${modelEnv}")
+        sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
+      }
+      // common.sendNotification(currentBuild.result,"",["slack"])
     }
+  }
 }
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 8af3fbe..b1b3d77 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -20,7 +20,7 @@
  *   NOTIFY_RECIPIENTS
  *   NOTIFY_TEXT
  *
- */
+*/
 
 common = new com.mirantis.mk.Common()
 git = new com.mirantis.mk.Git()
@@ -35,71 +35,73 @@
     [$class: 'BooleanParameterValue', name: 'RECREATE', value: recreate],
     [$class: 'StringParameterValue', name: 'SOURCE', value: source],
     [$class: 'StringParameterValue', name: 'STORAGES', value: storages],
-    [$class: 'StringParameterValue', name: 'TARGET', value: target]
+    [$class: 'StringParameterValue', name: 'TARGET', value: target],
   ]
 }
 
-def triggerDockerMirrorJob(dockerCredentials, dockerRegistryUrl, targetTag, imageList) {
+def triggerDockerMirrorJob(dockerCredentials, dockerRegistryUrl, targetTag, imageList, sourceImageTag) {
   build job: "docker-images-mirror", parameters: [
     [$class: 'StringParameterValue', name: 'TARGET_REGISTRY_CREDENTIALS_ID', value: dockerCredentials],
     [$class: 'StringParameterValue', name: 'REGISTRY_URL', value: dockerRegistryUrl],
     [$class: 'StringParameterValue', name: 'IMAGE_TAG', value: targetTag],
-    [$class: 'StringParameterValue', name: 'IMAGE_LIST', value: imageList]
+    [$class: 'StringParameterValue', name: 'IMAGE_LIST', value: imageList],
+    [$class: 'StringParameterValue', name: 'SOURCE_IMAGE_TAG', value: sourceImageTag],
   ]
 }
 
 def triggerMirrorRepoJob(snapshotId, snapshotName) {
   build job: "mirror-snapshot-name-all", parameters: [
     [$class: 'StringParameterValue', name: 'SNAPSHOT_NAME', value: snapshotName],
-    [$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: snapshotId]
+    [$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: snapshotId],
   ]
 }
 
-def triggerGitTagJob(gitRepoList, gitCredentials, tag) {
+def triggerGitTagJob(gitRepoList, gitCredentials, tag, sourceTag) {
   build job: "tag-git-repos-stable", parameters: [
     [$class: 'StringParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
     [$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
-    [$class: 'StringParameterValue', name: 'TAG', value: tag]
+    [$class: 'StringParameterValue', name: 'TAG', value: tag],
+    [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
   ]
 }
 
 timeout(time: 12, unit: 'HOURS') {
-    node() {
-        try {
-            stage("Promote"){
-                if(RELEASE_APTLY.toBoolean())
-                {
-                    common.infoMsg("Promoting Aptly")
-                    triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, "(.*)/${SOURCE_REVISION}", APTLY_STORAGES, "{0}/${TARGET_REVISION}")
-                }
+  node() {
+    try {
+      stage("Promote"){
+        if(RELEASE_APTLY.toBoolean())
+        {
+          common.infoMsg("Promoting Aptly")
+          triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, "(.*)/${SOURCE_REVISION}", APTLY_STORAGES, "{0}/${TARGET_REVISION}")
+        }
 
-                if(RELEASE_DEB_MIRRORS.toBoolean()){
-                    common.infoMsg("Promoting Debmirrors")
-                    triggerMirrorRepoJob(SOURCE_REVISION, TARGET_REVISION)
-                }
+        if(RELEASE_DEB_MIRRORS.toBoolean()){
+          common.infoMsg("Promoting Debmirrors")
+          triggerMirrorRepoJob(SOURCE_REVISION, TARGET_REVISION)
+        }
 
-                if(RELEASE_DOCKER.toBoolean())
-                {
-                    common.infoMsg("Promoting Docker images")
-                    triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, TARGET_REVISION, DOCKER_IMAGES)
-                }
+        if(RELEASE_DOCKER.toBoolean())
+        {
+          common.infoMsg("Promoting Docker images")
+          triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, TARGET_REVISION, DOCKER_IMAGES, SOURCE_REVISION)
+        }
 
-                if(RELEASE_GIT.toBoolean())
-                {
-                    common.infoMsg("Promoting Git repositories")
-                    triggerGitTagJob(GIT_REPO_LIST, GIT_CREDENTIALS, TARGET_REVISION)
+        if(RELEASE_GIT.toBoolean())
+        {
+          common.infoMsg("Promoting Git repositories")
+          triggerGitTagJob(GIT_REPO_LIST, GIT_CREDENTIALS, TARGET_REVISION, SOURCE_REVISION)
 
-                }
-                if (EMAIL_NOTIFY.toBoolean()) {
-                    emailext(to: NOTIFY_RECIPIENTS,
-                        body: NOTIFY_TEXT,
-                        subject: "MCP Promotion has been done")
-                }
-            }
-        } catch (Throwable e) {
+        }
+        if (EMAIL_NOTIFY.toBoolean()) {
+          emailext(to: NOTIFY_RECIPIENTS,
+            body: NOTIFY_TEXT,
+            subject: "MCP Promotion has been done")
+        }
+      }
+      } catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
             currentBuild.result = "FAILURE"
             throw e
+          }
         }
-    }
-}
\ No newline at end of file
+      }
diff --git a/tag-git-repos.groovy b/tag-git-repos.groovy
index 52344d1..dabbb7f 100644
--- a/tag-git-repos.groovy
+++ b/tag-git-repos.groovy
@@ -1,46 +1,58 @@
+
 /**
- *
- * Tag Git repositories
- *
- * Expected parameters:
- *   GIT_REPO_LIST
- *   GIT_CREDENTIALS
- *   TAG
- *
- */
+*
+* Tag Git repositories
+*
+* Expected parameters:
+*   GIT_REPO_LIST
+*   GIT_CREDENTIALS
+*   TAG
+*   SOURCE_TAG initial commit\tag to be tagged with TAG
+*
+*/
 
 common = new com.mirantis.mk.Common()
 git = new com.mirantis.mk.Git()
 
 def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
-    git.checkoutGitRepository(repoName, repoURL, "master", credentials)
-    dir(repoName) {
-        sh "git tag -f -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
-        sshagent([credentials]) {
-            sh "git push -f origin ${tag}:refs/tags/${tag}"
-        }
+  common.infoMsg("Tagging: ${repoURL} ${ref} => ${tag}")
+  git.checkoutGitRepository(repoName, repoURL, "master", credentials)
+  dir(repoName) {
+    sh "git tag -f -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+    sshagent([credentials]) {
+      sh "git push -f origin ${tag}:refs/tags/${tag}"
     }
+  }
 }
 
 timeout(time: 12, unit: 'HOURS') {
-    node() {
-        try {
-            def repos = GIT_REPO_LIST.tokenize('\n')
-            def repoUrl, repoName, repoCommit, repoArray
-            for (repo in repos){
-                if(repo.trim().indexOf(' ') == -1){
-                    throw new IllegalArgumentException("Wrong format of repository and commit input")
-                }
-                repoArray = repo.trim().tokenize(' ')
-                repoName = repoArray[0]
-                repoUrl = repoArray[1]
-                repoCommit = repoArray[2]
-                gitRepoAddTag(repoUrl, repoName, TAG, GIT_CREDENTIALS, repoCommit)
-            }
-        } catch (Throwable e) {
+  node() {
+    try {
+      def repos = GIT_REPO_LIST.tokenize('\n')
+      def repoUrl, repoName, repoCommit, repoArray
+      for (repo in repos){
+        if(repo.startsWith('#')){
+          common.warningMsg("Skipping repo ${repo}")
+          continue
+        }
+        if(repo.trim().indexOf(' ') == -1){
+          throw new IllegalArgumentException("Wrong format of repository and commit input")
+        }
+        repoArray = repo.trim().tokenize(' ')
+        repoName = repoArray[0]
+        repoUrl = repoArray[1]
+        repoCommit = repoArray[2]
+        if (repoCommit.contains('SUBS_SOURCE_REF')) {
+          common.warningMsg("Replacing SUBS_SOURCE_REF => ${SOURCE_TAG}")
+          repoCommit.replace('SUBS_SOURCE_REF', SOURCE_TAG
+            )
+        }
+        gitRepoAddTag(repoUrl, repoName, TAG, GIT_CREDENTIALS, repoCommit)
+      }
+      } catch (Throwable e) {
             // If there was an error or exception thrown, the build failed
             currentBuild.result = "FAILURE"
             throw e
+          }
         }
-    }
-}
\ No newline at end of file
+      }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index fedbd14..66961f8 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -4,6 +4,11 @@
 python = new com.mirantis.mk.Python()
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
 
+def reclassVersion = 'v1.5.4'
+if (common.validInputParam('RECLASS_VERSION')) {
+  reclassVersion = RECLASS_VERSION
+}
+
 def generateSaltMaster(modEnv, clusterDomain, clusterName) {
     def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
     def nodeString = """classes:
@@ -76,50 +81,44 @@
     generateSaltMaster(generatedModel, clusterDomain, clusterName)
 }
 
-def testModel(modelFile, testEnv) {
-    def templateEnv = "${env.WORKSPACE}"
-    def content = readFile(file: "${templateEnv}/contexts/${modelFile}.yml")
-    def templateContext = readYaml text: content
-    def clusterName = templateContext.default_context.cluster_name
-    def clusterDomain = templateContext.default_context.cluster_domain
-    if (SYSTEM_GIT_URL == "") {
-        git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
-    } else {
-        dir("${testEnv}/classes/system") {
-            if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
-              common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
-            }
-        }
+def testModel(modelFile, testEnv, reclassVersion='v1.5.4') {
+  def templateEnv = "${env.WORKSPACE}"
+  def content = readFile(file: "${templateEnv}/contexts/${modelFile}.yml")
+  def templateContext = readYaml text: content
+  def clusterName = templateContext.default_context.cluster_name
+  def clusterDomain = templateContext.default_context.cluster_domain
+  if (SYSTEM_GIT_URL == "") {
+    git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+  } else {
+    dir("${testEnv}/classes/system") {
+      if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
+        common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+      }
     }
+  }
 
-    def nbTry = 0
-    while (nbTry < 5) {
-        nbTry++
-        try {
-            def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-            saltModelTesting.setupAndTestNode(
-                    "cfg01.${clusterDomain}",
-                    clusterName,
-                    EXTRA_FORMULAS,
-                    testEnv,
-                    'pkg',
-                    DISTRIB_REVISION,
-                    'master',
-                    0,
-                    false,
-                    false,
-                    '',
-                    '',
-                    DockerCName)
-            break
-        } catch (Exception e) {
-            if (e.getMessage() == "script returned exit code 124") {
-                common.errorMsg("Impossible to test node due to timeout of salt-master, retriggering")
-            } else {
-                throw e
-            }
-        }
-    }
+  def testResult = false
+  def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+  testResult = saltModelTesting.setupAndTestNode(
+      "cfg01.${clusterDomain}",
+      clusterName,
+      EXTRA_FORMULAS,
+      testEnv,
+      'pkg',
+      DISTRIB_REVISION,
+      reclassVersion,
+      0,
+      false,
+      false,
+      '',
+      '',
+      DockerCName)
+  if (testResult) {
+    common.infoMsg("testModel finished: SUCCESS")
+  } else {
+    error('testModel finished: FAILURE')
+    throw new RuntimeException('Test stage finished: FAILURE')
+  }
 
 }
 
@@ -193,7 +192,7 @@
                     for(part in partition){
                         def basename = sh(script: "basename ${part} .yml", returnStdout: true).trim()
                         def testEnv = "${env.WORKSPACE}/model/${basename}"
-                        buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv) })
+                        buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv, reclassVersion) })
                     }
                 }
                 common.serial(buildSteps)
diff --git a/test-drivetrain.groovy b/test-drivetrain.groovy
new file mode 100644
index 0000000..fe7c87c
--- /dev/null
+++ b/test-drivetrain.groovy
@@ -0,0 +1,129 @@
+/**
+ *
+ * Test Drivetrain pipeline
+ *
+ * Expected parameters:
+ *   COOKIECUTTER_TEMPLATE_CONTEXT                 Template context for CookieCutter
+ *   SOURCE_MCP_VERSION                            MCP version to start with
+ *   TARGET_MCP_VERSION                            MCP version to upgrade to
+ *   FUNC_TEST_SETTINGS                            Settings for functional tests
+ *   ENVIRONMENT_IP                                IP of already deployed environment
+ *   DELETE_STACK                                  Option to delete Heat Stack
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+test = new com.mirantis.mk.Test()
+python = new com.mirantis.mk.Python()
+
+pepperEnv = "pepperEnv"
+
+def setupPepperVirtualenv(path, url, creds) {
+    requirements = ['salt-pepper>=0.5.2,<0.5.4']
+    python.setupVirtualenv(path, 'python2', requirements, null, true, true)
+    rcFile = "${path}/pepperrc"
+    rc = """\
+[main]
+SALTAPI_EAUTH=pam
+SALTAPI_URL=${url}
+SALTAPI_USER=${creds.username}
+SALTAPI_PASS=${creds.password}
+"""
+    writeFile file: rcFile, text: rc
+    return rcFile
+}
+
+def runJobOnJenkins(jenkinsUrl, userName, password, jobName, parameters){
+    def status = "null"
+    def jenkinsDownCmd = "curl -OL ${jenkinsUrl}/jnlpJars/jenkins-cli.jar --output ./jenkins-cli.jar"
+    def runJobFromSaltMasterCmd = "java -jar jenkins-cli.jar -s ${jenkinsUrl} -noKeyAuth -auth ${userName}:${password} build ${jobName} ${parameters} -w"
+    def waitJobFromSaltMasterCmd = "curl -s -X GET '${jenkinsUrl}/job/${jobName}/lastBuild/api/json?tree=result' --user ${userName}:${password} | jq -r '.result'"
+    salt.cmdRun(pepperEnv, "I@salt:master", jenkinsDownCmd)
+    salt.cmdRun(pepperEnv, "I@salt:master", runJobFromSaltMasterCmd)
+    while (status == "null" || status.contains("parse error")){
+        status = salt.cmdRun(pepperEnv, "I@salt:master", waitJobFromSaltMasterCmd, false)
+        status = status.get("return")[0].values()[0].trim()
+        println("The job ${jobName} result is $status")
+        if(status == "FAILURE"){
+            throw new Exception("The job ${jobName} result is FAILURE.")
+        }
+        sleep(10)
+    }
+}
+
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
+            def mcpEnvJob
+            def saltReturn
+            def saltCreds = [:]
+            def mcpEnvJobIP
+
+            stage('Trigger deploy job') {
+                if(ENVIRONMENT_IP == ""){
+                    mcpEnvJob = build(job: "create-mcp-env", parameters: [
+                        [$class: 'StringParameterValue', name: 'OS_AZ', value: 'mcp-mk'],
+                        [$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: 'mcp-mk'],
+                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: 'jenkins-drivetrain-test-' + currentBuild.number],
+                        [$class: 'StringParameterValue', name: 'STACK_INSTALL', value: 'core,cicd'],
+                        [$class: 'BooleanParameterValue', name: 'STACK_FULL', value: false],
+                        [$class: 'BooleanParameterValue', name: 'RUN_TESTS', value: false],
+                        [$class: 'TextParameterValue', name: 'COOKIECUTTER_TEMPLATE_CONTEXT', value: COOKIECUTTER_TEMPLATE_CONTEXT]
+                    ])
+                    def mcpEnvJobDesc = mcpEnvJob.getDescription().tokenize(" ")
+                    mcpEnvJobIP = mcpEnvJobDesc[2]
+                }else{
+                    mcpEnvJobIP = ENVIRONMENT_IP
+                }
+            }
+
+            def saltMasterUrl = "http://${mcpEnvJobIP}:6969"
+            def script = "println(com.cloudbees.plugins.credentials.CredentialsProvider.lookupCredentials(com.cloudbees.plugins.credentials.common.StandardUsernamePasswordCredentials.class,jenkins.model.Jenkins.instance).findAll {cred -> cred.id == 'salt'}[0].password)"
+            def saltPasswd = sh(returnStdout: true, script: "curl -d \"script=${script}\" --user admin:r00tme http://${mcpEnvJobIP}:8081/scriptText")
+            saltPasswd = saltPasswd.trim()
+            saltCreds.put("username", "salt")
+            saltCreds.put("password", saltPasswd)
+            setupPepperVirtualenv(pepperEnv, saltMasterUrl, saltCreds)
+            saltReturn = salt.getPillar(pepperEnv, 'I@jenkins:client and not I@salt:master', '_param:openldap_admin_password')
+            def stackCicdPassword = saltReturn.get("return")[0].values()[0]
+            saltReturn = salt.getPillar(pepperEnv, 'I@jenkins:client and not I@salt:master', 'jenkins:client:master:host')
+            def stackCicdAddr = saltReturn.get("return")[0].values()[0]
+            def jenkinsUrl = "http://${stackCicdAddr}:8081"
+
+            salt.cmdRun(pepperEnv, "I@salt:master", 'cd /srv/salt/reclass && echo -e ".gitignore\\nclasses/service/\\nnodes/_generated/" >> .gitignore')
+            salt.cmdRun(pepperEnv, "I@salt:master", "cd /srv/salt/reclass && git reset --hard")
+            salt.cmdRun(pepperEnv, "I@salt:master", "cd /srv/salt/reclass/classes/system && git reset --hard && git clean -fd")
+
+            //TODO: Temporary fix. Remove the line below after 2a3757a (reclass-system) is in stable tag.
+            salt.cmdRun(pepperEnv, "cid*", "mkdir /etc/aptly", false)
+
+            stage('Run CVP before upgrade') {
+                runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-sanity", "-p TESTS_SET=cvp-sanity-checks/cvp_checks/tests/test_drivetrain.py -p TESTS_SETTINGS='drivetrain_version=\"${SOURCE_MCP_VERSION}\"'")
+                //TODO: Enable functional tests after they become implemented.
+                //runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-dt-func", "-p SETTINGS=${FUNC_TEST_SETTINGS}")
+            }
+
+            stage('Run Upgrade on DriveTrain') {
+                runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "upgrade-mcp-release", "-p MCP_VERSION=${TARGET_MCP_VERSION}")
+            }
+
+            stage('Run CVP after upgrade') {
+                runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-sanity", "-p TESTS_SET=cvp-sanity-checks/cvp_checks/tests/test_drivetrain.py -p TESTS_SETTINGS='drivetrain_version=\"${TARGET_MCP_VERSION}\"'")
+                //TODO: Enable functional tests after they become implemented.
+                //runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-dt-func", "-p SETTINGS=${FUNC_TEST_SETTINGS}")
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        } finally{
+            if(DELETE_STACK.toBoolean() && ENVIRONMENT_IP == ""){
+                mcpEnvJob = build(job: "delete-heat-stack-for-mcp-env", parameters: [
+                    [$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: 'mcp-mk'],
+                    [$class: 'StringParameterValue', name: 'STACK_NAME', value: 'jenkins-drivetrain-test-' + currentBuild.number],
+                ])
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index 694f048..ed525bd 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -29,6 +29,11 @@
 
 def checkouted = false
 
+def reclassVersion = 'v1.5.4'
+if (common.validInputParam('RECLASS_VERSION')) {
+  reclassVersion = RECLASS_VERSION
+}
+
 throttle(['test-model']) {
   timeout(time: 1, unit: 'HOURS') {
     node("python&&docker") {
@@ -62,18 +67,18 @@
         stage("test node") {
           if (checkouted) {
             def workspace = common.getWorkspace()
+            def testResult = false
             common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
             try {
               def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-
-              test_result = saltModelTesting.setupAndTestNode(
+              testResult = saltModelTesting.setupAndTestNode(
                   NODE_TARGET,
                   CLUSTER_NAME,
                   EXTRA_FORMULAS,
                   workspace,
                   FORMULAS_SOURCE,
                   FORMULAS_REVISION,
-                  RECLASS_VERSION,
+                  reclassVersion,
                   MAX_CPU_PER_JOB.toInteger(),
                   RECLASS_IGNORE_CLASS_NOTFOUND,
                   LEGACY_TEST_MODE,
@@ -88,11 +93,11 @@
                 throw e
               }
             }
-            if (test_result) {
+            if (testResult) {
               common.infoMsg("Test finished: SUCCESS")
             } else {
-              common.warningMsg("Test finished: FAILURE")
-              currentBuild.result = "FAILURE"
+              error('Test node finished: FAILURE')
+              throw new RuntimeException('Test node stage finished: FAILURE')
             }
           }
         }
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 08796c9..62e5622 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -67,12 +67,16 @@
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
         try {
+            def gitMcpVersion = MCP_VERSION
             workspace = common.getWorkspace()
             python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
             if(MCP_VERSION == ""){
                 error("You must specify MCP version")
             }
+            if(MCP_VERSION == "testing"){
+                gitMcpVersion = "master"
+            }
 
             stage("Update Reclass"){
                 def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
@@ -92,7 +96,7 @@
                 catch(Exception ex){
                     error("You have unstaged changes in your Reclass system model repository. Please reset them and rerun the pipeline.")
                 }
-                salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $MCP_VERSION")
+                salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitMcpVersion")
             }
 
             if(UPDATE_LOCAL_REPOS.toBoolean()){