Fix CC tests

Change-Id: I1ce324cd83460723a740386788fe79b47593a40b
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 656c22d..703620d 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -5,7 +5,7 @@
  *   COOKIECUTTER_TEMPLATE_CONTEXT      Context parameters for the template generation.
  *   EMAIL_ADDRESS                      Email to send a created tar file
  *
-**/
+ **/
 
 common = new com.mirantis.mk.Common()
 git = new com.mirantis.mk.Git()
@@ -14,142 +14,142 @@
 ssh = new com.mirantis.mk.Ssh()
 
 timeout(time: 12, unit: 'HOURS') {
-    node("python&&docker") {
-        def templateEnv = "${env.WORKSPACE}/template"
-        def modelEnv = "${env.WORKSPACE}/model"
-        def testEnv = "${env.WORKSPACE}/test"
-        def pipelineEnv = "${env.WORKSPACE}/pipelines"
+  node("python&&docker") {
+    def templateEnv = "${env.WORKSPACE}/template"
+    def modelEnv = "${env.WORKSPACE}/model"
+    def testEnv = "${env.WORKSPACE}/test"
+    def pipelineEnv = "${env.WORKSPACE}/pipelines"
 
-        try {
-            def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
-            def mcpVersion = templateContext.default_context.mcp_version
-            def sharedReclassUrl = templateContext.default_context.shared_reclass_url
-            def clusterDomain = templateContext.default_context.cluster_domain
-            def clusterName = templateContext.default_context.cluster_name
-            def saltMaster = templateContext.default_context.salt_master_hostname
-            def localRepositories = templateContext.default_context.local_repositories.toBoolean()
-            def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
-            def cutterEnv = "${env.WORKSPACE}/cutter"
-            def jinjaEnv = "${env.WORKSPACE}/jinja"
-            def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
-            def systemEnv = "${modelEnv}/classes/system"
-            def targetBranch = "feature/${clusterName}"
-            def templateBaseDir = "${env.WORKSPACE}/template"
-            def templateDir = "${templateEnv}/template/dir"
-            def templateOutputDir = templateBaseDir
-            def user
-            wrap([$class: 'BuildUser']) {
-                user = env.BUILD_USER_ID
+    try {
+      def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+      def mcpVersion = templateContext.default_context.mcp_version
+      def sharedReclassUrl = templateContext.default_context.shared_reclass_url
+      def clusterDomain = templateContext.default_context.cluster_domain
+      def clusterName = templateContext.default_context.cluster_name
+      def saltMaster = templateContext.default_context.salt_master_hostname
+      def localRepositories = templateContext.default_context.local_repositories.toBoolean()
+      def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
+      def cutterEnv = "${env.WORKSPACE}/cutter"
+      def jinjaEnv = "${env.WORKSPACE}/jinja"
+      def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+      def systemEnv = "${modelEnv}/classes/system"
+      def targetBranch = "feature/${clusterName}"
+      def templateBaseDir = "${env.WORKSPACE}/template"
+      def templateDir = "${templateEnv}/template/dir"
+      def templateOutputDir = templateBaseDir
+      def user
+      wrap([$class: 'BuildUser']) {
+        user = env.BUILD_USER_ID
+      }
+
+      currentBuild.description = clusterName
+      print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+
+      stage ('Download Cookiecutter template') {
+        def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
+        def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
+        git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
+        // Use refspec if exists first of all
+        if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
+          dir(templateEnv) {
+            ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
+          }
+        } else {
+          // Use mcpVersion git tag if not specified branch for cookiecutter-templates
+          if (cookiecutterTemplateBranch == '') {
+            cookiecutterTemplateBranch = mcpVersion
+            // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
+            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+              cookiecutterTemplateBranch = 'master'
             }
+          }
+          git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
+        }
+      }
 
-            currentBuild.description = clusterName
-            print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+      stage ('Create empty reclass model') {
+        dir(path: modelEnv) {
+          sh "rm -rfv .git"
+          sh "git init"
+          ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
+        }
 
-            stage ('Download Cookiecutter template') {
-                def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
-                def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
-                git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
-                // Use refspec if exists first of all
-                if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
-                    dir(templateEnv) {
-                        ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
-                    }
-                } else {
-                    // Use mcpVersion git tag if not specified branch for cookiecutter-templates
-                    if (cookiecutterTemplateBranch == '') {
-                        cookiecutterTemplateBranch = mcpVersion
-                        // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
-                        if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
-                            cookiecutterTemplateBranch = 'master'
-                        }
-                    }
-                    git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
-                }
+        def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
+        // Use refspec if exists first of all
+        if (sharedReclassBranch.toString().startsWith('refs/')) {
+          dir(systemEnv) {
+            ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
+          }
+        } else {
+          // Use mcpVersion git tag if not specified branch for reclass-system
+          if (sharedReclassBranch == '') {
+            sharedReclassBranch = mcpVersion
+            // Don't have nightly/testing/stable for reclass-system repo, therefore use master
+            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+              sharedReclassBranch = 'master'
             }
+          }
+          git.changeGitBranch(systemEnv, sharedReclassBranch)
+        }
+        git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
+      }
 
-            stage ('Create empty reclass model') {
-                dir(path: modelEnv) {
-                    sh "rm -rfv .git"
-                    sh "git init"
-                    ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
-                }
+      def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
+      for (product in productList) {
 
-                def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
-                // Use refspec if exists first of all
-                if (sharedReclassBranch.toString().startsWith('refs/')) {
-                    dir(systemEnv) {
-                        ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
-                    }
-                } else {
-                    // Use mcpVersion git tag if not specified branch for reclass-system
-                    if (sharedReclassBranch == '') {
-                        sharedReclassBranch = mcpVersion
-                        // Don't have nightly/testing/stable for reclass-system repo, therefore use master
-                        if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
-                            sharedReclassBranch = 'master'
-                        }
-                    }
-                    git.changeGitBranch(systemEnv, sharedReclassBranch)
-                }
-                git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
-            }
+        // get templateOutputDir and productDir
+        if (product.startsWith("stacklight")) {
+          templateOutputDir = "${env.WORKSPACE}/output/stacklight"
 
-            def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
-            for (product in productList) {
+          def stacklightVersion
+          try {
+            stacklightVersion = templateContext.default_context['stacklight_version']
+          } catch (Throwable e) {
+            common.warningMsg('Stacklight version loading failed')
+          }
 
-                // get templateOutputDir and productDir
-                if (product.startsWith("stacklight")) {
-                    templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+          if (stacklightVersion) {
+            productDir = "stacklight" + stacklightVersion
+          } else {
+            productDir = "stacklight1"
+          }
 
-                    def stacklightVersion
-                    try {
-                        stacklightVersion = templateContext.default_context['stacklight_version']
-                    } catch (Throwable e) {
-                        common.warningMsg('Stacklight version loading failed')
-                    }
+        } else {
+          templateOutputDir = "${env.WORKSPACE}/output/${product}"
+          productDir = product
+        }
 
-                    if (stacklightVersion) {
-                        productDir = "stacklight" + stacklightVersion
-                    } else {
-                        productDir = "stacklight1"
-                    }
+        if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+            && templateContext.default_context["${product}_enabled"].toBoolean())) {
 
-                } else {
-                    templateOutputDir = "${env.WORKSPACE}/output/${product}"
-                    productDir = product
-                }
+          templateDir = "${templateEnv}/cluster_product/${productDir}"
+          common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
 
-                if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                    && templateContext.default_context["${product}_enabled"].toBoolean())) {
+          sh "rm -rf ${templateOutputDir} || true"
+          sh "mkdir -p ${templateOutputDir}"
+          sh "mkdir -p ${outputDestination}"
 
-                    templateDir = "${templateEnv}/cluster_product/${productDir}"
-                    common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+          python.setupCookiecutterVirtualenv(cutterEnv)
+          python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
+          sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+        } else {
+          common.warningMsg("Product " + product + " is disabled")
+        }
+      }
 
-                    sh "rm -rf ${templateOutputDir} || true"
-                    sh "mkdir -p ${templateOutputDir}"
-                    sh "mkdir -p ${outputDestination}"
+      if(localRepositories && !offlineDeployment){
+        def aptlyModelUrl = templateContext.default_context.local_model_url
+        dir(path: modelEnv) {
+          ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
+          if(!(mcpVersion in ["nightly", "testing", "stable"])){
+            ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
+          }
+        }
+      }
 
-                    python.setupCookiecutterVirtualenv(cutterEnv)
-                    python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
-                    sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-                } else {
-                    common.warningMsg("Product " + product + " is disabled")
-                }
-            }
-
-            if(localRepositories && !offlineDeployment){
-                def aptlyModelUrl = templateContext.default_context.local_model_url
-                dir(path: modelEnv) {
-                    ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
-                        if(!(mcpVersion in ["nightly", "testing", "stable"])){
-                        ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
-                    }
-                }
-            }
-
-            stage('Generate new SaltMaster node') {
-                def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
-                def nodeString = """classes:
+      stage('Generate new SaltMaster node') {
+        def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
+        def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
   _param:
@@ -160,151 +160,153 @@
       name: ${saltMaster}
       domain: ${clusterDomain}
     """
-                sh "mkdir -p ${modelEnv}/nodes/"
-                writeFile(file: nodeFile, text: nodeString)
+        sh "mkdir -p ${modelEnv}/nodes/"
+        writeFile(file: nodeFile, text: nodeString)
 
-                git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
-            }
+        git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
+      }
 
-          stage("Test") {
-            def testResult = false
-            if (sharedReclassUrl != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
-              sh("cp -r ${modelEnv} ${testEnv}")
-              def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-              testResult = saltModelTesting.setupAndTestNode(
-                  "${saltMaster}.${clusterDomain}",
-                  "",
-                  "",
-                  testEnv,
-                  'pkg',
-                  'stable',
-                  'master',
-                  0,
-                  false,
-                  false,
-                  '',
-                  '',
-                  DockerCName)
-            }
-            if (testResult) {
-              common.infoMsg("Test finished: SUCCESS")
-            } else {
-              error('Test finished: FAILURE')
-              currentBuild.result = "FAILURE"
-            }
+      stage("Test") {
+        if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
+          def testResult = false
+          sh("cp -r ${modelEnv} ${testEnv}")
+          def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+          testResult = saltModelTesting.setupAndTestNode(
+              "${saltMaster}.${clusterDomain}",
+              "",
+              "",
+              testEnv,
+              'pkg',
+              'stable',
+              'master',
+              0,
+              false,
+              false,
+              '',
+              '',
+              DockerCName)
+          if (testResult) {
+            common.infoMsg("Test finished: SUCCESS")
+          } else {
+            common.infoMsg('Test finished: FAILURE')
+            throw new RuntimeException('Test stage finished: FAILURE')
           }
-            stage("Generate config drives") {
-                // apt package genisoimage is required for this stage
-
-                // download create-config-drive
-                // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
-                def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
-                if (mcpCommonScriptsBranch == '') {
-                    mcpCommonScriptsBranch = mcpVersion
-                    // Don't have nightly for mcp-common-scripts repo, therefore use master
-                    if(mcpVersion == "nightly"){
-                        mcpCommonScriptsBranch = 'master'
-                    }
-                }
-                def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
-                def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
-
-                sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
-                sh "wget -O user_data.sh ${user_data_script_url}"
-
-                sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
-                sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
-                args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
-                // load data from model
-                def smc = [:]
-                smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
-                smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
-                smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-                smc['MCP_VERSION'] = "${mcpVersion}"
-                if (templateContext['default_context']['local_repositories'] == 'True'){
-                    def localRepoIP = templateContext['default_context']['local_repo_url']
-                    smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
-                    smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
-                    smc['PIPELINES_FROM_ISO'] = 'false'
-                    smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
-                    smc['LOCAL_REPOS'] = 'true'
-                }
-                if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
-                    if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
-                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    } else {
-                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    }
-                }
-
-                for (i in common.entries(smc)) {
-                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
-                }
-
-                // create cfg config-drive
-                sh "./create-config-drive ${args}"
-                sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                // save cfg iso to artifacts
-                archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
-                if (templateContext['default_context']['local_repositories'] == 'True'){
-                    def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
-                    def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
-                    sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
-
-                    def smc_apt = [:]
-                    smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                    smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
-                    smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                    smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
-                    for (i in common.entries(smc_apt)) {
-                        sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
-                    }
-
-                    // create apt config-drive
-                    sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
-                    sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                    // save apt iso to artifacts
-                    archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
-                }
-            }
-
-            stage ('Save changes reclass model') {
-                sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
-                archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
-
-                if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
-                     emailext(to: EMAIL_ADDRESS,
-                              attachmentsPattern: "output-${clusterName}/*",
-                              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                              subject: "Your Salt model ${clusterName}")
-                }
-                dir("output-${clusterName}"){
-                    deleteDir()
-                }
-            }
-
-        } catch (Throwable e) {
-             // If there was an error or exception thrown, the build failed
-             currentBuild.result = "FAILURE"
-             currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-             throw e
-        } finally {
-            stage ('Clean workspace directories') {
-                sh(returnStatus: true, script: "rm -rf ${templateEnv}")
-                sh(returnStatus: true, script: "rm -rf ${modelEnv}")
-                sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
-            }
-             // common.sendNotification(currentBuild.result,"",["slack"])
+        } else {
+          common.warningMsg("Test stage has been skipped!")
         }
+      }
+      stage("Generate config drives") {
+        // apt package genisoimage is required for this stage
+
+        // download create-config-drive
+        // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+        def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
+        if (mcpCommonScriptsBranch == '') {
+          mcpCommonScriptsBranch = mcpVersion
+          // Don't have nightly for mcp-common-scripts repo, therefore use master
+          if(mcpVersion == "nightly"){
+            mcpCommonScriptsBranch = 'master'
+          }
+        }
+        def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
+        def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
+
+        sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+        sh "wget -O user_data.sh ${user_data_script_url}"
+
+        sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+        sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+        args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
+        // load data from model
+        def smc = [:]
+        smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
+        smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+        smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+        smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+        smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+        smc['MCP_VERSION'] = "${mcpVersion}"
+        if (templateContext['default_context']['local_repositories'] == 'True'){
+          def localRepoIP = templateContext['default_context']['local_repo_url']
+          smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+          smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+          smc['PIPELINES_FROM_ISO'] = 'false'
+          smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+          smc['LOCAL_REPOS'] = 'true'
+        }
+        if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
+          if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
+            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+          } else {
+            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+          }
+        }
+
+        for (i in common.entries(smc)) {
+          sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
+        }
+
+        // create cfg config-drive
+        sh "./create-config-drive ${args}"
+        sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+
+        // save cfg iso to artifacts
+        archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+        if (templateContext['default_context']['local_repositories'] == 'True'){
+          def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+          def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
+          sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
+
+          def smc_apt = [:]
+          smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+          smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
+          smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+          smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+          for (i in common.entries(smc_apt)) {
+            sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+          }
+
+          // create apt config-drive
+          sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+          sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+          // save apt iso to artifacts
+          archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+        }
+      }
+
+      stage ('Save changes reclass model') {
+        sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
+        archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
+
+
+        if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+          emailext(to: EMAIL_ADDRESS,
+              attachmentsPattern: "output-${clusterName}/*",
+              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+              subject: "Your Salt model ${clusterName}")
+        }
+        dir("output-${clusterName}"){
+          deleteDir()
+        }
+      }
+
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      throw e
+    } finally {
+      stage ('Clean workspace directories') {
+        sh(returnStatus: true, script: "rm -rf ${templateEnv}")
+        sh(returnStatus: true, script: "rm -rf ${modelEnv}")
+        sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
+      }
+      // common.sendNotification(currentBuild.result,"",["slack"])
     }
+  }
 }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index eb74b22..5fe02db 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -112,7 +112,7 @@
     common.infoMsg("testModel finished: SUCCESS")
   } else {
     error('testModel finished: FAILURE')
-    currentBuild.result = "FAILURE"
+    throw new RuntimeException('Test stage finished: FAILURE')
   }
 
 }
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index 9e72555..9b31168 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -91,8 +91,8 @@
             if (testResult) {
               common.infoMsg("Test finished: SUCCESS")
             } else {
-              error('Test finished: FAILURE')
-              currentBuild.result = "FAILURE"
+              error('Test node finished: FAILURE')
+              throw new RuntimeException('Test node stage finished: FAILURE')
             }
           }
         }