Merge "Add opencontrail.client state for collector nodes"
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index fb28837..a52ea9c 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -6,256 +6,249 @@
  *   EMAIL_ADDRESS                      Email to send a created tar file
  *   CREDENTIALS_ID                     Credentials id for git
  **/
+import static groovy.json.JsonOutput.toJson
+import static groovy.json.JsonOutput.prettyPrint
 
 common = new com.mirantis.mk.Common()
+common2 = new com.mirantis.mcp.Common()
 git = new com.mirantis.mk.Git()
 python = new com.mirantis.mk.Python()
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
 
-
 slaveNode = env.SLAVE_NODE ?: 'python&&docker'
 gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
 
-timeout(time: 2, unit: 'HOURS') {
+timeout(time: 1, unit: 'HOURS') {
     node(slaveNode) {
-        sshagent(credentials: [gerritCredentials]) {
-            def templateEnv = "${env.WORKSPACE}/template"
-            def modelEnv = "${env.WORKSPACE}/model"
-            def testEnv = "${env.WORKSPACE}/test"
-            def pipelineEnv = "${env.WORKSPACE}/pipelines"
+        def templateEnv = "${env.WORKSPACE}/template"
+        def modelEnv = "${env.WORKSPACE}/model"
+        def testEnv = "${env.WORKSPACE}/test"
+        def pipelineEnv = "${env.WORKSPACE}/pipelines"
 
-            try {
-                def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
-                def mcpVersion = templateContext.default_context.mcp_version
-                def sharedReclassUrl = templateContext.default_context.shared_reclass_url
-                def clusterDomain = templateContext.default_context.cluster_domain
-                def clusterName = templateContext.default_context.cluster_name
-                def saltMaster = templateContext.default_context.salt_master_hostname
-                def cutterEnv = "${env.WORKSPACE}/cutter"
-                def jinjaEnv = "${env.WORKSPACE}/jinja"
-                def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
-                def systemEnv = "${modelEnv}/classes/system"
-                def targetBranch = "feature/${clusterName}"
-                def templateBaseDir = "${env.WORKSPACE}/template"
-                def templateDir = "${templateEnv}/template/dir"
-                def templateOutputDir = templateBaseDir
-                def user
-                def testResult = false
-                wrap([$class: 'BuildUser']) {
-                    user = env.BUILD_USER_ID
+        try {
+            def templateContext = readYaml text: env.COOKIECUTTER_TEMPLATE_CONTEXT
+            // TODO: switch to apt_mk_version im context['mcp_version']
+            // TODO add check's for critical var's
+            def context = templateContext['default_context']
+            // Use mcpVersion git tag if not specified branch for cookiecutter-templates
+            if (!context.get('cookiecutter_template_branch', false)) {
+                context['cookiecutter_template_branch'] = context['mcp_version']
+                // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
+                if (["nightly", "testing", "stable"].contains(context['mcp_version'])) {
+                    common.warningMsg("Fetching cookiecutterTemplate from master!")
+                    context['cookiecutter_template_branch'] = 'master'
                 }
-                currentBuild.description = clusterName
-                common.infoMsg("Using context:\n" + templateContext)
-
-
-                stage('Download Cookiecutter template') {
-                    sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
-                    def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
-                    def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
-                    // Use mcpVersion git tag if not specified branch for cookiecutter-templates
-                    if (cookiecutterTemplateBranch == '') {
-                        cookiecutterTemplateBranch = mcpVersion
-                        // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
-                        if (["nightly", "testing", "stable"].contains(mcpVersion)) {
-                            cookiecutterTemplateBranch = 'master'
-                        }
-                    }
-                    checkout([
-                        $class           : 'GitSCM',
-                        branches         : [[name: 'FETCH_HEAD'],],
-                        extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: templateEnv]],
-                        userRemoteConfigs: [[url: cookiecutterTemplateUrl, refspec: cookiecutterTemplateBranch, credentialsId: gerritCredentials],],
-                    ])
-                }
-                stage('Create empty reclass model') {
-                    dir(path: modelEnv) {
-                        sh "rm -rfv .git; git init"
-                        sh "git submodule add ${sharedReclassUrl} 'classes/system'"
-                    }
-
-                    def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
-                    // Use mcpVersion git tag if not specified branch for reclass-system
-                    if (sharedReclassBranch == '') {
-                        sharedReclassBranch = mcpVersion
-                        // Don't have nightly/testing for reclass-system repo, therefore use master
-                        if (["nightly", "testing", "stable"].contains(mcpVersion)) {
-                            common.warningMsg("Fetching reclass-system from master!")
-                            sharedReclassBranch = 'master'
-                        }
-                    }
-                    checkout([
-                        $class           : 'GitSCM',
-                        branches         : [[name: 'FETCH_HEAD'],],
-                        extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: systemEnv]],
-                        userRemoteConfigs: [[url: sharedReclassUrl, refspec: sharedReclassBranch, credentialsId: gerritCredentials],],
-                    ])
-                    git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
-                }
-
-                stage('Generate model') {
-                    python.setupCookiecutterVirtualenv(cutterEnv)
-                    python.generateModel(COOKIECUTTER_TEMPLATE_CONTEXT, 'default_context', saltMaster, cutterEnv, modelEnv, templateEnv, false)
-                    git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
-                }
-
-                stage("Test") {
-                    if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
-                        distribRevision = mcpVersion
-                        if (['master'].contains(mcpVersion)) {
-                            distribRevision = 'nightly'
-                        }
-                        if (distribRevision.contains('/')) {
-                            distribRevision = distribRevision.split('/')[-1]
-                        }
-                        // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
-                        if (!common.checkRemoteBinary([apt_mk_version: distribRevision]).linux_system_repo_url) {
-                            common.errorMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
-                            distribRevision = 'proposed'
-                        }
-                        sh("cp -r ${modelEnv} ${testEnv}")
-                        def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-                        common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
-                        try {
-                            def config = [
-                                'dockerHostname'     : "${saltMaster}.${clusterDomain}",
-                                'reclassEnv'         : testEnv,
-                                'distribRevision'    : distribRevision,
-                                'dockerContainerName': DockerCName,
-                                'testContext'        : 'salt-model-node'
-                            ]
-                            testResult = saltModelTesting.testNode(config)
-                            common.infoMsg("Test finished: SUCCESS")
-                        } catch (Exception ex) {
-                            common.warningMsg("Test finished: FAILED")
-                            testResult = false
-                        }
-                    } else {
-                        common.warningMsg("Test stage has been skipped!")
-                    }
-                }
-                stage("Generate config drives") {
-                    // apt package genisoimage is required for this stage
-
-                    // download create-config-drive
-                    // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
-                    def mcpCommonScriptsBranch = templateContext['default_context']['mcp_common_scripts_branch']
-                    if (mcpCommonScriptsBranch == '') {
-                        mcpCommonScriptsBranch = mcpVersion
-                        // Don't have n/t/s for mcp-common-scripts repo, therefore use master
-                        if (["nightly", "testing", "stable"].contains(mcpVersion)) {
-                            common.warningMsg("Fetching mcp-common-scripts from master!")
-                            mcpCommonScriptsBranch = 'master'
-                        }
-                    }
-                    def commonScriptsRepoUrl = templateContext['default_context']['mcp_common_scripts_repo'] ?: 'ssh://gerrit.mcp.mirantis.com:29418/mcp/mcp-common-scripts'
-                    checkout([
-                        $class           : 'GitSCM',
-                        branches         : [[name: 'FETCH_HEAD'],],
-                        extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
-                        userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch, credentialsId: gerritCredentials],],
-                    ])
-
-                    sh 'cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive'
-                    sh '[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data'
-
-                    sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
-                    sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
-                    args = "--user-data user_data --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
-                    // load data from model
-                    def smc = [:]
-                    smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
-                    smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                    smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
-                    smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                    if (templateContext['default_context'].get('deploy_network_mtu')) {
-                        smc['DEPLOY_NETWORK_MTU'] = templateContext['default_context']['deploy_network_mtu']
-                    }
-                    smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-                    smc['MCP_VERSION'] = "${mcpVersion}"
-                    if (templateContext['default_context']['local_repositories'] == 'True') {
-                        def localRepoIP = templateContext['default_context']['local_repo_url']
-                        smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
-                        smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
-                        smc['PIPELINES_FROM_ISO'] = 'false'
-                        smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
-                        smc['LOCAL_REPOS'] = 'true'
-                    }
-                    if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
-                        if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
-                            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        } else {
-                            smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                            smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                        }
-                    }
-
-                    for (i in common.entries(smc)) {
-                        sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
-                    }
-
-                    // create cfg config-drive
-                    sh "./create-config-drive ${args}"
-                    sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                    // save cfg iso to artifacts
-                    archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
-                    if (templateContext['default_context']['local_repositories'] == 'True') {
-                        def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
-                        sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
-
-                        def smc_apt = [:]
-                        smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                        smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
-                        smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                        smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
-                        for (i in common.entries(smc_apt)) {
-                            sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
-                        }
-
-                        // create apt config-drive
-                        sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
-                        sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
-                        // save apt iso to artifacts
-                        archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
-                    }
-                }
-
-                stage('Save changes reclass model') {
-                    sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
-                    archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
-                    if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
-                        emailext(to: EMAIL_ADDRESS,
-                            attachmentsPattern: "output-${clusterName}/*",
-                            body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                            subject: "Your Salt model ${clusterName}")
-                    }
-                    dir("output-${clusterName}") {
-                        deleteDir()
-                    }
-                }
-
-                // Fail, but leave possibility to get failed artifacts
-                if (!testResult && TEST_MODEL.toBoolean()) {
-                    common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
-                    error('Test stage finished: FAILURE')
-                }
-
-            } catch (Throwable e) {
-                currentBuild.result = "FAILURE"
-                currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-                throw e
-            } finally {
-                stage('Clean workspace directories') {
-                    sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
-                }
-                // common.sendNotification(currentBuild.result,"",["slack"])
             }
+            // Use context['mcp_version'] git tag if not specified branch for reclass-system
+            if (!context.get('shared_reclass_branch', false)) {
+                context['shared_reclass_branch'] = context['mcp_version']
+                // Don't have nightly/testing for reclass-system repo, therefore use master
+                if (["nightly", "testing", "stable"].contains(context['mcp_version'])) {
+                    common.warningMsg("Fetching reclass-system from master!")
+                    context['shared_reclass_branch'] = 'master'
+                }
+            }
+            //
+            distribRevision = context['mcp_version']
+            if (['master'].contains(context['mcp_version'])) {
+                distribRevision = 'nightly'
+            }
+            if (distribRevision.contains('/')) {
+                distribRevision = distribRevision.split('/')[-1]
+            }
+            //
+            def cutterEnv = "${env.WORKSPACE}/cutter"
+            def systemEnv = "${modelEnv}/classes/system"
+            def testResult = false
+            def user
+            wrap([$class: 'BuildUser']) {
+                user = env.BUILD_USER_ID
+            }
+            currentBuild.description = context['cluster_name']
+            common.infoMsg("Using context:\n" + context)
+            print prettyPrint(toJson(context))
+            stage('Download Cookiecutter template') {
+                sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+                checkout([
+                    $class           : 'GitSCM',
+                    branches         : [[name: 'FETCH_HEAD'],],
+                    extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: templateEnv]],
+                    userRemoteConfigs: [[url: context['cookiecutter_template_url'], refspec: context['cookiecutter_template_branch'], credentialsId: gerritCredentials],],
+                ])
+            }
+            stage('Create empty reclass model') {
+                dir(path: modelEnv) {
+                    sh "rm -rfv .git; git init"
+                    sshagent(credentials: [gerritCredentials]) {
+                        sh "git submodule add ${context['shared_reclass_url']} 'classes/system'"
+                    }
+                }
+                checkout([
+                    $class           : 'GitSCM',
+                    branches         : [[name: 'FETCH_HEAD'],],
+                    extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: systemEnv]],
+                    userRemoteConfigs: [[url: context['shared_reclass_url'], refspec: context['shared_reclass_branch'], credentialsId: gerritCredentials],],
+                ])
+                git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
+            }
+
+            stage('Generate model') {
+                python.setupCookiecutterVirtualenv(cutterEnv)
+                // FIXME refactor generateModel
+                python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, modelEnv, templateEnv, false)
+                git.commitGitChanges(modelEnv, "Create model ${context['cluster_name']}", "${user}@localhost", "${user}")
+            }
+
+            stage("Test") {
+                if (env.TEST_MODEL.toBoolean()) {
+                    // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
+                    if (!common.checkRemoteBinary([apt_mk_version: distribRevision]).linux_system_repo_url) {
+                        common.errorMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
+                        distribRevision = 'proposed'
+                    }
+                    sh("cp -r ${modelEnv} ${testEnv}")
+                    def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+                    common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
+                    try {
+                        def config = [
+                            'dockerHostname'     : "${context['salt_master_hostname']}.${context['cluster_domain']}",
+                            'reclassEnv'         : testEnv,
+                            'distribRevision'    : distribRevision,
+                            'dockerContainerName': DockerCName,
+                            'testContext'        : 'salt-model-node'
+                        ]
+                        testResult = saltModelTesting.testNode(config)
+                        common.infoMsg("Test finished: SUCCESS")
+                    } catch (Exception ex) {
+                        common.warningMsg("Test finished: FAILED")
+                        testResult = false
+                    }
+                } else {
+                    common.warningMsg("Test stage has been skipped!")
+                }
+            }
+            stage("Generate config drives") {
+                // apt package genisoimage is required for this stage
+
+                // download create-config-drive
+                // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+                def mcpCommonScriptsBranch = context['mcp_common_scripts_branch']
+                if (mcpCommonScriptsBranch == '') {
+                    mcpCommonScriptsBranch = context['mcp_version']
+                    // Don't have n/t/s for mcp-common-scripts repo, therefore use master
+                    if (["nightly", "testing", "stable"].contains(context['mcp_version'])) {
+                        common.warningMsg("Fetching mcp-common-scripts from master!")
+                        mcpCommonScriptsBranch = 'master'
+                    }
+                }
+                def commonScriptsRepoUrl = context['mcp_common_scripts_repo'] ?: 'ssh://gerrit.mcp.mirantis.com:29418/mcp/mcp-common-scripts'
+                checkout([
+                    $class           : 'GitSCM',
+                    branches         : [[name: 'FETCH_HEAD'],],
+                    extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
+                    userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch, credentialsId: gerritCredentials],],
+                ])
+
+                sh 'cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive'
+                sh '[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data'
+
+                sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+                sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+                args = "--user-data user_data --hostname ${context['salt_master_hostname']} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"
+
+                // load data from model
+                def smc = [:]
+                smc['SALT_MASTER_MINION_ID'] = "${context['salt_master_hostname']}.${context['cluster_domain']}"
+                smc['SALT_MASTER_DEPLOY_IP'] = context['salt_master_management_address']
+                smc['DEPLOY_NETWORK_GW'] = context['deploy_network_gateway']
+                smc['DEPLOY_NETWORK_NETMASK'] = context['deploy_network_netmask']
+                if (context.get('deploy_network_mtu')) {
+                    smc['DEPLOY_NETWORK_MTU'] = context['deploy_network_mtu']
+                }
+                smc['DNS_SERVERS'] = context['dns_server01']
+                smc['MCP_VERSION'] = "${context['mcp_version']}"
+                if (context['local_repositories'] == 'True') {
+                    def localRepoIP = context['local_repo_url']
+                    smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+                    smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+                    smc['PIPELINES_FROM_ISO'] = 'false'
+                    smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+                    smc['LOCAL_REPOS'] = 'true'
+                }
+                if (context['upstream_proxy_enabled'] == 'True') {
+                    if (context['upstream_proxy_auth_enabled'] == 'True') {
+                        smc['http_proxy'] = 'http://' + context['upstream_proxy_user'] + ':' + context['upstream_proxy_password'] + '@' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+                        smc['https_proxy'] = 'http://' + context['upstream_proxy_user'] + ':' + context['upstream_proxy_password'] + '@' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+                    } else {
+                        smc['http_proxy'] = 'http://' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+                        smc['https_proxy'] = 'http://' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+                    }
+                }
+
+                for (i in common.entries(smc)) {
+                    sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+                }
+
+                // create cfg config-drive
+                sh "./create-config-drive ${args}"
+                sh("mkdir output-${context['cluster_name']} && mv ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso output-${context['cluster_name']}/")
+
+                // save cfg iso to artifacts
+                archiveArtifacts artifacts: "output-${context['cluster_name']}/${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"
+
+                if (context['local_repositories'] == 'True') {
+                    def aptlyServerHostname = context.aptly_server_hostname
+                    sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
+
+                    def smc_apt = [:]
+                    smc_apt['SALT_MASTER_DEPLOY_IP'] = context['salt_master_management_address']
+                    smc_apt['APTLY_DEPLOY_IP'] = context['aptly_server_deploy_address']
+                    smc_apt['APTLY_DEPLOY_NETMASK'] = context['deploy_network_netmask']
+                    smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${context['cluster_domain']}"
+
+                    for (i in common.entries(smc_apt)) {
+                        sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
+                    }
+
+                    // create apt config-drive
+                    sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+                    sh("mv ${aptlyServerHostname}.${context['cluster_domain']}-config.iso output-${context['cluster_name']}/")
+
+                    // save apt iso to artifacts
+                    archiveArtifacts artifacts: "output-${context['cluster_name']}/${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+                }
+            }
+
+            stage('Save changes reclass model') {
+                sh(returnStatus: true, script: "tar -czf output-${context['cluster_name']}/${context['cluster_name']}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
+                archiveArtifacts artifacts: "output-${context['cluster_name']}/${context['cluster_name']}.tar.gz"
+
+                if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+                    emailext(to: EMAIL_ADDRESS,
+                        attachmentsPattern: "output-${context['cluster_name']}/*",
+                        body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+                        subject: "Your Salt model ${context['cluster_name']}")
+                }
+                dir("output-${context['cluster_name']}") {
+                    deleteDir()
+                }
+            }
+
+            // Fail, but leave possibility to get failed artifacts
+            if (!testResult && env.TEST_MODEL.toBoolean()) {
+                common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
+                error('Test stage finished: FAILURE')
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        } finally {
+            stage('Clean workspace directories') {
+                sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+            }
+            // common.sendNotification(currentBuild.result,"",["slack"])
         }
     }
 }
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index e2dbf83..608182e 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -18,6 +18,7 @@
 }
 
 def checkouted = false
+def openstackTest = false
 
 throttle(['test-formula']) {
   timeout(time: 1, unit: 'HOURS') {
@@ -37,8 +38,16 @@
         }
         stage("kitchen") {
           if (checkouted) {
-            if (fileExists(".kitchen.yml")) {
-              common.infoMsg(".kitchen.yml found, running kitchen tests")
+            if (fileExists(".kitchen.yml") || fileExists(".kitchen.openstack.yml")) {
+              if (fileExists(".kitchen.openstack.yml")) {
+                common.infoMsg("Openstack Kitchen test configuration found, running Openstack kitchen tests.")
+                if (fileExists(".kitchen.yml")) {
+                  common.infoMsg("Ignoring the docker Kitchen test configuration file.")
+                }
+                openstackTest = true
+              } else {
+                common.infoMsg("Docker Kitchen test configuration found, running Docker kitchen tests.")
+              }
               ruby.ensureRubyEnv()
               if (fileExists(".travis.yml")) {
                 common.infoMsg(".travis.yml found, running custom kitchen init")
@@ -46,11 +55,21 @@
                 def kitchenInit = kitchenConfigYML["install"]
                 def kitchenInstalled = false
                 if (kitchenInit && !kitchenInit.isEmpty()) {
-                  for (int i = 0; i < kitchenInit.size(); i++) {
-                    if (kitchenInit[i].trim().startsWith("test -e Gemfile")) { //found Gemfile config
-                      common.infoMsg("Custom Gemfile configuration found, using them")
-                      ruby.installKitchen(kitchenInit[i].trim())
-                      kitchenInstalled = true
+                  if (!openstackTest) {
+                    for (int i = 0; i < kitchenInit.size(); i++) {
+                      if (kitchenInit[i].trim().startsWith("test -e Gemfile")) { //found Gemfile config
+                        common.infoMsg("Custom Gemfile configuration found, using them")
+                        ruby.installKitchen(kitchenInit[i].trim())
+                        kitchenInstalled = true
+                      }
+                    }
+                  } else {
+                    for (int i = 0; i < kitchenInit.size(); i++) {
+                      if (kitchenInit[i].trim().startsWith("git clone")) { //found Gemfile config TODO: Change keywords ??
+                        common.infoMsg("Custom Gemfile configuration found, using them")
+                        sh(kitchenInit[i].trim())
+                        kitchenInstalled = true
+                      }
                     }
                   }
                 }
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 0caef9c..434654c 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -61,6 +61,7 @@
     [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
   ]
 }
+
 timeout(time: 2, unit: 'HOURS') {
   node(slaveNode) {
     try {
@@ -132,8 +133,15 @@
       }
     stage("kitchen") {
         if (checkouted) {
-          if (fileExists(".kitchen.yml")) {
-            common.infoMsg(".kitchen.yml found, running kitchen tests")
+          if (fileExists(".kitchen.yml") || fileExists(".kitchen.openstack.yml")) {
+            if (fileExists(".kitchen.openstack.yml")) {
+              common.infoMsg("Openstack Kitchen test configuration found, running Openstack kitchen tests.")
+              if (fileExists(".kitchen.yml")) {
+                common.infoMsg("Ignoring the docker Kitchen test configuration file.")
+              }
+            } else {
+              common.infoMsg("Docker Kitchen test configuration found, running Docker kitchen tests.")
+            }
             def kitchenEnvs = []
             def filteredEnvs = []
             if (fileExists(".travis.yml")) {