Merge "Add TEMPEST_VERSION parameter and fix condition PROD-21521 Change-Id: I68f3536cb6762862f1608bdb49c709d170650023"
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 2361098..601d74d 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -11,6 +11,7 @@
  *  OSD                                 Failed OSD ids to be replaced (comma-separated list - 1,2,3)
  *  DEVICE                              Comma separated list of failed devices that will be replaced at HOST (/dev/sdb,/dev/sdc)
  *  JOURNAL_BLOCKDB_BLOCKWAL_PARTITION  Comma separated list of partitions where journal or block_db or block_wal for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
+ *  DATA_PARTITION                      Comma separated list of mounted partitions of failed device. These partitions will be unmounted. For ex. /dev/sdb1,/dev/sdb3
  *  CLUSTER_FLAGS                       Comma separated list of tags to apply to cluster
  *  WAIT_FOR_HEALTHY                    Wait for cluster rebalance before stoping daemons
  *  DMCRYPT                             Set to True if replacing osds are/were encrypted
@@ -26,6 +27,7 @@
 def osds = OSD.tokenize(',')
 def devices = DEVICE.tokenize(',')
 def journals_blockdbs_blockwals = JOURNAL_BLOCKDB_BLOCKWAL_PARTITION.tokenize(',')
+def mounted_partitions = DATA_PARTITION.tokenize(',')
 
 
 def runCephCommand(master, target, cmd) {
@@ -158,7 +160,6 @@
             }
 
 
-
             // zap disks `ceph-disk zap /dev/sdi`
             stage('Zap devices') {
                 for (dev in devices) {
@@ -174,9 +175,23 @@
         } else {
 
             // umount `umount /dev/sdi1`
-            stage('Umount devices') {
-                for (dev in devices) {
-                    runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+            stage('Umount partitions') {
+                if (mounted_partitions == null || mounted_partitions.empty) {
+                    for (dev in devices) {
+                        try {
+                            runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
+                    }
+                } else {
+                    for (part in mounted_partitions) {
+                        try {
+                            runCephCommand(pepperEnv, HOST, 'umount ' + part)
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
+                    }
                 }
             }
 
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index a7f35d5..1eecbc0 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -329,7 +329,7 @@
 
                     if (common.checkContains('STACK_INSTALL', 'kvm')) {
                         orchestrate.installInfraKvm(venvPepper, extra_tgt)
-                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_target)
+                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt)
                     }
 
                     orchestrate.validateFoundationInfra(venvPepper, extra_tgt)
diff --git a/deploy-virtual-edge-mom.groovy b/deploy-virtual-edge-mom.groovy
index af0d8a7..875195b 100644
--- a/deploy-virtual-edge-mom.groovy
+++ b/deploy-virtual-edge-mom.groovy
@@ -14,7 +14,7 @@
  *   STACK_TEMPLATE             File with stack template
  *   STACK_TEST                 Run tests (bool)
  *   EDGE_DEPLOY_SCHEMAS        Env schemas to deploy as edge clouds
- *   MOM_JOB                    Type of Master-of-Masters stack
+ *   MOM_JOB                    Job name to deploy Master-of-Masters stack
  */
 
 common = new com.mirantis.mk.Common()
@@ -31,7 +31,7 @@
     deployMoMJob = MOM_JOB
 }
 
-def deploy_schemas = '{os_ha_ovs: {deploy_job_name: "deploy-heat-os_ha_ovs", properties: {SLAVE_NODE: "python", STACK_INSTALL: "openstack,ovs", STACK_TEMPLATE: "os_ha_ovs", STACK_TYPE: "heat", FORMULA_PKG_REVISION: "testing", STACK_DELETE: false, STACK_CLUSTER_NAME: "os-ha-ovs"}}}'
+def deploy_schemas = '{os_ha_ovs: {deploy_job_name: "deploy-heat-os_ha_ovs", properties: {SLAVE_NODE: "python", STACK_INSTALL: "openstack,ovs", STACK_TEMPLATE: "os_ha_ovs", STACK_TYPE: "heat", FORMULA_PKG_REVISION: "testing", STACK_DELETE: false, STACK_CLUSTER_NAME: "os-ha-ovs", STACK_RECLASS_ADDRESS: "", STACK_RECLASS_BRANCH: ""}}}'
 if (common.validInputParam('EDGE_DEPLOY_SCHEMAS')) {
     deploy_schemas = EDGE_DEPLOY_SCHEMAS
 }
@@ -142,6 +142,8 @@
                             [$class: 'StringParameterValue', name: 'STACK_TEMPLATE_URL', value: STACK_TEMPLATE_URL],
                             [$class: 'StringParameterValue', name: 'STACK_TEMPLATE_BRANCH', value: 'master'],
                             [$class: 'StringParameterValue', name: 'STACK_TYPE', value: 'heat'],
+                            [$class: 'StringParameterValue', name: 'STACK_RECLASS_ADDRESS', value: props['STACK_RECLASS_ADDRESS']],
+                            [$class: 'StringParameterValue', name: 'STACK_RECLASS_BRANCH', value: props['STACK_RECLASS_BRANCH']],
                             [$class: 'StringParameterValue', name: 'FORMULA_PKG_REVISION', value: props['FORMULA_PKG_REVISION']],
                             [$class: 'StringParameterValue', name: 'STACK_CLUSTER_NAME', value: props['STACK_CLUSTER_NAME']],
                             [$class: 'StringParameterValue', name: 'STACK_TEST', value: ''],
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 6964653..703620d 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -5,7 +5,7 @@
  *   COOKIECUTTER_TEMPLATE_CONTEXT      Context parameters for the template generation.
  *   EMAIL_ADDRESS                      Email to send a created tar file
  *
-**/
+ **/
 
 common = new com.mirantis.mk.Common()
 git = new com.mirantis.mk.Git()
@@ -14,142 +14,142 @@
 ssh = new com.mirantis.mk.Ssh()
 
 timeout(time: 12, unit: 'HOURS') {
-    node("python&&docker") {
-        def templateEnv = "${env.WORKSPACE}/template"
-        def modelEnv = "${env.WORKSPACE}/model"
-        def testEnv = "${env.WORKSPACE}/test"
-        def pipelineEnv = "${env.WORKSPACE}/pipelines"
+  node("python&&docker") {
+    def templateEnv = "${env.WORKSPACE}/template"
+    def modelEnv = "${env.WORKSPACE}/model"
+    def testEnv = "${env.WORKSPACE}/test"
+    def pipelineEnv = "${env.WORKSPACE}/pipelines"
 
-        try {
-            def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
-            def mcpVersion = templateContext.default_context.mcp_version
-            def sharedReclassUrl = templateContext.default_context.shared_reclass_url
-            def clusterDomain = templateContext.default_context.cluster_domain
-            def clusterName = templateContext.default_context.cluster_name
-            def saltMaster = templateContext.default_context.salt_master_hostname
-            def localRepositories = templateContext.default_context.local_repositories.toBoolean()
-            def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
-            def cutterEnv = "${env.WORKSPACE}/cutter"
-            def jinjaEnv = "${env.WORKSPACE}/jinja"
-            def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
-            def systemEnv = "${modelEnv}/classes/system"
-            def targetBranch = "feature/${clusterName}"
-            def templateBaseDir = "${env.WORKSPACE}/template"
-            def templateDir = "${templateEnv}/template/dir"
-            def templateOutputDir = templateBaseDir
-            def user
-            wrap([$class: 'BuildUser']) {
-                user = env.BUILD_USER_ID
+    try {
+      def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+      def mcpVersion = templateContext.default_context.mcp_version
+      def sharedReclassUrl = templateContext.default_context.shared_reclass_url
+      def clusterDomain = templateContext.default_context.cluster_domain
+      def clusterName = templateContext.default_context.cluster_name
+      def saltMaster = templateContext.default_context.salt_master_hostname
+      def localRepositories = templateContext.default_context.local_repositories.toBoolean()
+      def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
+      def cutterEnv = "${env.WORKSPACE}/cutter"
+      def jinjaEnv = "${env.WORKSPACE}/jinja"
+      def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+      def systemEnv = "${modelEnv}/classes/system"
+      def targetBranch = "feature/${clusterName}"
+      def templateBaseDir = "${env.WORKSPACE}/template"
+      def templateDir = "${templateEnv}/template/dir"
+      def templateOutputDir = templateBaseDir
+      def user
+      wrap([$class: 'BuildUser']) {
+        user = env.BUILD_USER_ID
+      }
+
+      currentBuild.description = clusterName
+      print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+
+      stage ('Download Cookiecutter template') {
+        def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
+        def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
+        git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
+        // Use refspec if exists first of all
+        if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
+          dir(templateEnv) {
+            ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
+          }
+        } else {
+          // Use mcpVersion git tag if not specified branch for cookiecutter-templates
+          if (cookiecutterTemplateBranch == '') {
+            cookiecutterTemplateBranch = mcpVersion
+            // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
+            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+              cookiecutterTemplateBranch = 'master'
             }
+          }
+          git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
+        }
+      }
 
-            currentBuild.description = clusterName
-            print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+      stage ('Create empty reclass model') {
+        dir(path: modelEnv) {
+          sh "rm -rfv .git"
+          sh "git init"
+          ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
+        }
 
-            stage ('Download Cookiecutter template') {
-                def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
-                def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
-                git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
-                // Use refspec if exists first of all
-                if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
-                    dir(templateEnv) {
-                        ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
-                    }
-                } else {
-                    // Use mcpVersion git tag if not specified branch for cookiecutter-templates
-                    if (cookiecutterTemplateBranch == '') {
-                        cookiecutterTemplateBranch = mcpVersion
-                        // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
-                        if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
-                            cookiecutterTemplateBranch = 'master'
-                        }
-                    }
-                    git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
-                }
+        def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
+        // Use refspec if exists first of all
+        if (sharedReclassBranch.toString().startsWith('refs/')) {
+          dir(systemEnv) {
+            ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
+          }
+        } else {
+          // Use mcpVersion git tag if not specified branch for reclass-system
+          if (sharedReclassBranch == '') {
+            sharedReclassBranch = mcpVersion
+            // Don't have nightly/testing/stable for reclass-system repo, therefore use master
+            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+              sharedReclassBranch = 'master'
             }
+          }
+          git.changeGitBranch(systemEnv, sharedReclassBranch)
+        }
+        git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
+      }
 
-            stage ('Create empty reclass model') {
-                dir(path: modelEnv) {
-                    sh "rm -rfv .git"
-                    sh "git init"
-                    ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
-                }
+      def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
+      for (product in productList) {
 
-                def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
-                // Use refspec if exists first of all
-                if (sharedReclassBranch.toString().startsWith('refs/')) {
-                    dir(systemEnv) {
-                        ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
-                    }
-                } else {
-                    // Use mcpVersion git tag if not specified branch for reclass-system
-                    if (sharedReclassBranch == '') {
-                        sharedReclassBranch = mcpVersion
-                        // Don't have nightly/testing/stable for reclass-system repo, therefore use master
-                        if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
-                            sharedReclassBranch = 'master'
-                        }
-                    }
-                    git.changeGitBranch(systemEnv, sharedReclassBranch)
-                }
-                git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
-            }
+        // get templateOutputDir and productDir
+        if (product.startsWith("stacklight")) {
+          templateOutputDir = "${env.WORKSPACE}/output/stacklight"
 
-            def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
-            for (product in productList) {
+          def stacklightVersion
+          try {
+            stacklightVersion = templateContext.default_context['stacklight_version']
+          } catch (Throwable e) {
+            common.warningMsg('Stacklight version loading failed')
+          }
 
-                // get templateOutputDir and productDir
-                if (product.startsWith("stacklight")) {
-                    templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+          if (stacklightVersion) {
+            productDir = "stacklight" + stacklightVersion
+          } else {
+            productDir = "stacklight1"
+          }
 
-                    def stacklightVersion
-                    try {
-                        stacklightVersion = templateContext.default_context['stacklight_version']
-                    } catch (Throwable e) {
-                        common.warningMsg('Stacklight version loading failed')
-                    }
+        } else {
+          templateOutputDir = "${env.WORKSPACE}/output/${product}"
+          productDir = product
+        }
 
-                    if (stacklightVersion) {
-                        productDir = "stacklight" + stacklightVersion
-                    } else {
-                        productDir = "stacklight1"
-                    }
+        if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+            && templateContext.default_context["${product}_enabled"].toBoolean())) {
 
-                } else {
-                    templateOutputDir = "${env.WORKSPACE}/output/${product}"
-                    productDir = product
-                }
+          templateDir = "${templateEnv}/cluster_product/${productDir}"
+          common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
 
-                if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                    && templateContext.default_context["${product}_enabled"].toBoolean())) {
+          sh "rm -rf ${templateOutputDir} || true"
+          sh "mkdir -p ${templateOutputDir}"
+          sh "mkdir -p ${outputDestination}"
 
-                    templateDir = "${templateEnv}/cluster_product/${productDir}"
-                    common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+          python.setupCookiecutterVirtualenv(cutterEnv)
+          python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
+          sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+        } else {
+          common.warningMsg("Product " + product + " is disabled")
+        }
+      }
 
-                    sh "rm -rf ${templateOutputDir} || true"
-                    sh "mkdir -p ${templateOutputDir}"
-                    sh "mkdir -p ${outputDestination}"
+      if(localRepositories && !offlineDeployment){
+        def aptlyModelUrl = templateContext.default_context.local_model_url
+        dir(path: modelEnv) {
+          ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
+          if(!(mcpVersion in ["nightly", "testing", "stable"])){
+            ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
+          }
+        }
+      }
 
-                    python.setupCookiecutterVirtualenv(cutterEnv)
-                    python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
-                    sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-                } else {
-                    common.warningMsg("Product " + product + " is disabled")
-                }
-            }
-
-            if(localRepositories && !offlineDeployment){
-                def aptlyModelUrl = templateContext.default_context.local_model_url
-                dir(path: modelEnv) {
-                    ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
-                        if(!(mcpVersion in ["nightly", "testing", "stable"])){
-                        ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
-                    }
-                }
-            }
-
-            stage('Generate new SaltMaster node') {
-                def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
-                def nodeString = """classes:
+      stage('Generate new SaltMaster node') {
+        def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
+        def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
   _param:
@@ -160,145 +160,153 @@
       name: ${saltMaster}
       domain: ${clusterDomain}
     """
-                sh "mkdir -p ${modelEnv}/nodes/"
-                writeFile(file: nodeFile, text: nodeString)
+        sh "mkdir -p ${modelEnv}/nodes/"
+        writeFile(file: nodeFile, text: nodeString)
 
-                git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
-            }
+        git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
+      }
 
-            stage("Test") {
-                if (sharedReclassUrl != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
-                    sh("cp -r ${modelEnv} ${testEnv}")
-                    def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-                    saltModelTesting.setupAndTestNode(
-                            "${saltMaster}.${clusterDomain}",
-                            "",
-                            "",
-                            testEnv,
-                            'pkg',
-                            'stable',
-                            'master',
-                            0,
-                            false,
-                            false,
-                            '',
-                            '',
-                            DockerCName)
-                }
-            }
-
-            stage("Generate config drives") {
-                // apt package genisoimage is required for this stage
-
-                // download create-config-drive
-                // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
-                def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
-                if (mcpCommonScriptsBranch == '') {
-                    mcpCommonScriptsBranch = mcpVersion
-                    // Don't have nightly for mcp-common-scripts repo, therefore use master
-                    if(mcpVersion == "nightly"){
-                        mcpCommonScriptsBranch = 'master'
-                    }
-                }
-                def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
-                def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
-
-                sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
-                sh "wget -O user_data.sh ${user_data_script_url}"
-
-                sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
-                sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
-                args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
-                // load data from model
-                def smc = [:]
-                smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
-                smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
-                smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-                smc['MCP_VERSION'] = "${mcpVersion}"
-                if (templateContext['default_context']['local_repositories'] == 'True'){
-                    def localRepoIP = templateContext['default_context']['local_repo_url']
-                    smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
-                    smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
-                    smc['PIPELINES_FROM_ISO'] = 'false'
-                    smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
-                    smc['LOCAL_REPOS'] = 'true'
-                }
-                if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
-                    if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
-                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    } else {
-                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    }
-                }
-
-                for (i in common.entries(smc)) {
-                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
-                }
-
-                // create cfg config-drive
-                sh "./create-config-drive ${args}"
-                sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                // save cfg iso to artifacts
-                archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
-                if (templateContext['default_context']['local_repositories'] == 'True'){
-                    def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
-                    def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
-                    sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
-
-                    def smc_apt = [:]
-                    smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                    smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
-                    smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                    smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
-                    for (i in common.entries(smc_apt)) {
-                        sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
-                    }
-
-                    // create apt config-drive
-                    sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
-                    sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                    // save apt iso to artifacts
-                    archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
-                }
-            }
-
-            stage ('Save changes reclass model') {
-                sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
-                archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
-
-                if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
-                     emailext(to: EMAIL_ADDRESS,
-                              attachmentsPattern: "output-${clusterName}/*",
-                              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                              subject: "Your Salt model ${clusterName}")
-                }
-                dir("output-${clusterName}"){
-                    deleteDir()
-                }
-            }
-
-        } catch (Throwable e) {
-             // If there was an error or exception thrown, the build failed
-             currentBuild.result = "FAILURE"
-             currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-             throw e
-        } finally {
-            stage ('Clean workspace directories') {
-                sh(returnStatus: true, script: "rm -rf ${templateEnv}")
-                sh(returnStatus: true, script: "rm -rf ${modelEnv}")
-                sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
-            }
-             // common.sendNotification(currentBuild.result,"",["slack"])
+      stage("Test") {
+        if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
+          def testResult = false
+          sh("cp -r ${modelEnv} ${testEnv}")
+          def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+          testResult = saltModelTesting.setupAndTestNode(
+              "${saltMaster}.${clusterDomain}",
+              "",
+              "",
+              testEnv,
+              'pkg',
+              'stable',
+              'master',
+              0,
+              false,
+              false,
+              '',
+              '',
+              DockerCName)
+          if (testResult) {
+            common.infoMsg("Test finished: SUCCESS")
+          } else {
+            common.infoMsg('Test finished: FAILURE')
+            throw new RuntimeException('Test stage finished: FAILURE')
+          }
+        } else {
+          common.warningMsg("Test stage has been skipped!")
         }
+      }
+      stage("Generate config drives") {
+        // apt package genisoimage is required for this stage
+
+        // download create-config-drive
+        // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+        def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
+        if (mcpCommonScriptsBranch == '') {
+          mcpCommonScriptsBranch = mcpVersion
+          // Don't have nightly for mcp-common-scripts repo, therefore use master
+          if(mcpVersion == "nightly"){
+            mcpCommonScriptsBranch = 'master'
+          }
+        }
+        def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
+        def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
+
+        sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+        sh "wget -O user_data.sh ${user_data_script_url}"
+
+        sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+        sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+        args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
+        // load data from model
+        def smc = [:]
+        smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
+        smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+        smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+        smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+        smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+        smc['MCP_VERSION'] = "${mcpVersion}"
+        if (templateContext['default_context']['local_repositories'] == 'True'){
+          def localRepoIP = templateContext['default_context']['local_repo_url']
+          smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+          smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+          smc['PIPELINES_FROM_ISO'] = 'false'
+          smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+          smc['LOCAL_REPOS'] = 'true'
+        }
+        if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
+          if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
+            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+          } else {
+            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+          }
+        }
+
+        for (i in common.entries(smc)) {
+          sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
+        }
+
+        // create cfg config-drive
+        sh "./create-config-drive ${args}"
+        sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+
+        // save cfg iso to artifacts
+        archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+        if (templateContext['default_context']['local_repositories'] == 'True'){
+          def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+          def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
+          sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
+
+          def smc_apt = [:]
+          smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+          smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
+          smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+          smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+          for (i in common.entries(smc_apt)) {
+            sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+          }
+
+          // create apt config-drive
+          sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+          sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+          // save apt iso to artifacts
+          archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+        }
+      }
+
+      stage ('Save changes reclass model') {
+        sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
+        archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
+
+
+        if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+          emailext(to: EMAIL_ADDRESS,
+              attachmentsPattern: "output-${clusterName}/*",
+              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+              subject: "Your Salt model ${clusterName}")
+        }
+        dir("output-${clusterName}"){
+          deleteDir()
+        }
+      }
+
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      throw e
+    } finally {
+      stage ('Clean workspace directories') {
+        sh(returnStatus: true, script: "rm -rf ${templateEnv}")
+        sh(returnStatus: true, script: "rm -rf ${modelEnv}")
+        sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
+      }
+      // common.sendNotification(currentBuild.result,"",["slack"])
     }
+  }
 }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index fedbd14..5fe02db 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -77,49 +77,43 @@
 }
 
 def testModel(modelFile, testEnv) {
-    def templateEnv = "${env.WORKSPACE}"
-    def content = readFile(file: "${templateEnv}/contexts/${modelFile}.yml")
-    def templateContext = readYaml text: content
-    def clusterName = templateContext.default_context.cluster_name
-    def clusterDomain = templateContext.default_context.cluster_domain
-    if (SYSTEM_GIT_URL == "") {
-        git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
-    } else {
-        dir("${testEnv}/classes/system") {
-            if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
-              common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
-            }
-        }
+  def templateEnv = "${env.WORKSPACE}"
+  def content = readFile(file: "${templateEnv}/contexts/${modelFile}.yml")
+  def templateContext = readYaml text: content
+  def clusterName = templateContext.default_context.cluster_name
+  def clusterDomain = templateContext.default_context.cluster_domain
+  if (SYSTEM_GIT_URL == "") {
+    git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+  } else {
+    dir("${testEnv}/classes/system") {
+      if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
+        common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+      }
     }
+  }
 
-    def nbTry = 0
-    while (nbTry < 5) {
-        nbTry++
-        try {
-            def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-            saltModelTesting.setupAndTestNode(
-                    "cfg01.${clusterDomain}",
-                    clusterName,
-                    EXTRA_FORMULAS,
-                    testEnv,
-                    'pkg',
-                    DISTRIB_REVISION,
-                    'master',
-                    0,
-                    false,
-                    false,
-                    '',
-                    '',
-                    DockerCName)
-            break
-        } catch (Exception e) {
-            if (e.getMessage() == "script returned exit code 124") {
-                common.errorMsg("Impossible to test node due to timeout of salt-master, retriggering")
-            } else {
-                throw e
-            }
-        }
-    }
+  def testResult = false
+  def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+  testResult = saltModelTesting.setupAndTestNode(
+      "cfg01.${clusterDomain}",
+      clusterName,
+      EXTRA_FORMULAS,
+      testEnv,
+      'pkg',
+      DISTRIB_REVISION,
+      'master',
+      0,
+      false,
+      false,
+      '',
+      '',
+      DockerCName)
+  if (testResult) {
+    common.infoMsg("testModel finished: SUCCESS")
+  } else {
+    error('testModel finished: FAILURE')
+    throw new RuntimeException('Test stage finished: FAILURE')
+  }
 
 }
 
diff --git a/test-drivetrain.groovy b/test-drivetrain.groovy
new file mode 100644
index 0000000..515ab71
--- /dev/null
+++ b/test-drivetrain.groovy
@@ -0,0 +1,102 @@
+/**
+ *
+ * Test Drivetrain pipeline
+ *
+ * Expected parameters:
+ *   COOKIECUTTER_TEMPLATE_CONTEXT                 Template context for CookieCutter
+ *   SOURCE_MCP_VERSION                            MCP version to start with
+ *   TARGET_MCP_VERSION                            MCP version to upgrade to
+ *   FUNC_TEST_SETTINGS                            Settings for functional tests
+ *   ENVIRONMENT_IP                                IP of already deployed environment
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+test = new com.mirantis.mk.Test()
+python = new com.mirantis.mk.Python()
+
+pepperEnv = "pepperEnv"
+
+def setupPepperVirtualenv(path, url, creds) {
+    requirements = ['salt-pepper>=0.5.2,<0.5.4']
+    python.setupVirtualenv(path, 'python2', requirements, null, true, true)
+    rcFile = "${path}/pepperrc"
+    rc = """\
+[main]
+SALTAPI_EAUTH=pam
+SALTAPI_URL=${url}
+SALTAPI_USER=${creds.username}
+SALTAPI_PASS=${creds.password}
+"""
+    writeFile file: rcFile, text: rc
+    return rcFile
+}
+
+def runJobOnJenkins(jenkinsUrl, userName, password, jobName, parameters){
+    def jenkinsDownCmd = "curl -OL ${jenkinsUrl}/jnlpJars/jenkins-cli.jar --output ./jenkins-cli.jar"
+    def runJobFromSaltMasterCmd = "java -jar jenkins-cli.jar -s ${jenkinsUrl} -noKeyAuth -auth admin:${password} build ${jobName} ${parameters} -s | grep -E 'SUCCESS|UNSTABLE'"
+    salt.cmdRun(pepperEnv, "I@salt:master", jenkinsDownCmd)
+    salt.cmdRun(pepperEnv, "I@salt:master", runJobFromSaltMasterCmd)
+}
+
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
+            def mcpEnvJob
+            def saltReturn
+            def saltCreds = [:]
+            def mcpEnvJobIP
+
+            if(ENVIRONMENT_IP == ""){
+                stage('Trigger deploy job') {
+                    mcpEnvJob = build(job: "create-mcp-env", parameters: [
+                        [$class: 'StringParameterValue', name: 'OS_AZ', value: 'mcp-mk'],
+                        [$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: 'mcp-mk'],
+                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: 'jenkins-drivetrain-test-' + currentBuild.number],
+                        [$class: 'StringParameterValue', name: 'STACK_INSTALL', value: 'core,cicd'],
+                        [$class: 'BooleanParameterValue', name: 'STACK_FULL', value: false],
+                        [$class: 'BooleanParameterValue', name: 'RUN_TESTS', value: false],
+                        [$class: 'TextParameterValue', name: 'COOKIECUTTER_TEMPLATE_CONTEXT', value: COOKIECUTTER_TEMPLATE_CONTEXT]
+                    ])
+                }
+
+                def mcpEnvJobDesc = mcpEnvJob.getDescription().tokenize(" ")
+                mcpEnvJobIP = mcpEnvJobDesc[2]
+            }else{
+                mcpEnvJobIP = ENVIRONMENT_IP
+            }
+
+            def saltMasterUrl = "http://${mcpEnvJobIP}:6969"
+            def script = "println(com.cloudbees.plugins.credentials.CredentialsProvider.lookupCredentials(com.cloudbees.plugins.credentials.common.StandardUsernamePasswordCredentials.class,jenkins.model.Jenkins.instance).findAll {cred -> cred.id == 'salt'}[0].password)"
+            def saltPasswd = sh(returnStdout: true, script: "curl -d \"script=${script}\" --user admin:r00tme http://${mcpEnvJobIP}:8081/scriptText")
+            saltPasswd = saltPasswd.trim()
+            saltCreds.put("username", "salt")
+            saltCreds.put("password", saltPasswd)
+            setupPepperVirtualenv(pepperEnv, saltMasterUrl, saltCreds)
+            saltReturn = salt.getPillar(pepperEnv, 'I@jenkins:client and not I@salt:master', '_param:openldap_admin_password')
+            def stackCicdPassword = saltReturn.get("return")[0].values()[0]
+            saltReturn = salt.getPillar(pepperEnv, 'I@jenkins:client and not I@salt:master', 'jenkins:client:master:host')
+            def stackCicdAddr = saltReturn.get("return")[0].values()[0]
+            def jenkinsUrl = "http://${stackCicdAddr}:8081"
+
+            stage('Run CVP before upgrade') {
+                runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-sanity", "-p SANITY_TESTS_SET=test_drivetrain.py -p SANITY_TESTS_SETTINGS='drivetrain_version=\"${SOURCE_MCP_VERSION}\"'")
+                //runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-dt-func", "-p SETTINGS=${FUNC_TEST_SETTINGS}")
+            }
+
+            stage('Run Upgrade on DriveTrain') {
+                runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "upgrade-mcp-release", "-p MCP_VERSION=${TARGET_MCP_VERSION}")
+            }
+
+            stage('Run CVP after upgrade') {
+                runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-sanity", "-p SANITY_TESTS_SET=test_drivetrain.py -p SANITY_TESTS_SETTINGS='drivetrain_version=\"${TARGET_MCP_VERSION}\"'")
+                //runJobOnJenkins(jenkinsUrl, "admin", stackCicdPassword, "cvp-dt-func", "-p SETTINGS=${FUNC_TEST_SETTINGS}")
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        }
+    }
+}
\ No newline at end of file
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index 694f048..9b31168 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -62,11 +62,11 @@
         stage("test node") {
           if (checkouted) {
             def workspace = common.getWorkspace()
+            def testResult = false
             common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
             try {
               def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-
-              test_result = saltModelTesting.setupAndTestNode(
+              testResult = saltModelTesting.setupAndTestNode(
                   NODE_TARGET,
                   CLUSTER_NAME,
                   EXTRA_FORMULAS,
@@ -88,11 +88,11 @@
                 throw e
               }
             }
-            if (test_result) {
+            if (testResult) {
               common.infoMsg("Test finished: SUCCESS")
             } else {
-              common.warningMsg("Test finished: FAILURE")
-              currentBuild.result = "FAILURE"
+              error('Test node finished: FAILURE')
+              throw new RuntimeException('Test node stage finished: FAILURE')
             }
           }
         }