Merge "Enable checkResponse for rally and tempest execution, get rid of log files"
diff --git a/.gitreview b/.gitreview
index 3a1eac3..b0ed746 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
 [gerrit]
-host=gerrit.mcp.mirantis.net
+host=gerrit.mcp.mirantis.com
 port=29418
 project=mcp-ci/pipeline-library.git
diff --git a/src/com/mirantis/mcp/Common.groovy b/src/com/mirantis/mcp/Common.groovy
index d6a34c4..b501b04 100644
--- a/src/com/mirantis/mcp/Common.groovy
+++ b/src/com/mirantis/mcp/Common.groovy
@@ -8,8 +8,8 @@
  * fix groovy List.collect()
  **/
 @NonCPS
-def constructString(ArrayList options, String keyOption, String separator = " ") {
-    return options.collect { keyOption + it }.join(separator).replaceAll("\n", "")
+def constructString(ArrayList options, String keyOption, String separator = ' ') {
+    return options.collect { keyOption + it }.join(separator).replaceAll('\n', '')
 }
 
 /**
@@ -18,8 +18,8 @@
  * @param format Defaults to yyyyMMddHHmmss
  */
 def getDatetime(format = "yyyyMMddHHmmss") {
-    def now = new Date();
-    return now.format(format, TimeZone.getTimeZone('UTC'));
+    def now = new Date()
+    return now.format(format, TimeZone.getTimeZone('UTC'))
 }
 
 /**
@@ -30,7 +30,7 @@
   if (env) {
     sh "tox -v -e ${env}"
   } else {
-    sh "tox -v"
+    sh 'tox -v'
   }
 }
 
diff --git a/src/com/mirantis/mcp/MCPArtifactory.groovy b/src/com/mirantis/mcp/MCPArtifactory.groovy
index 025ac7d..0101eb8 100644
--- a/src/com/mirantis/mcp/MCPArtifactory.groovy
+++ b/src/com/mirantis/mcp/MCPArtifactory.groovy
@@ -32,35 +32,47 @@
 }
 
 /**
- * Get URL to artifact by properties
- * Returns String with URL to found artifact or null if nothing
+ * Get URL to artifact(s) by properties
+ * Returns String(s) with URL to found artifact or null if nothing
  *
  * @param artifactoryURL String, an URL to Artifactory
  * @param properties LinkedHashMap, a Hash of properties (key-value) which
  *        which should determine artifact in Artifactory
+ * @param onlyLastItem Boolean, return only last URL if true(by default),
+ *        else return list of all found artifact URLS
+ *
  */
-def uriByProperties(String artifactoryURL, LinkedHashMap properties) {
+def uriByProperties(String artifactoryURL, LinkedHashMap properties, Boolean onlyLastItem=true) {
     def key, value
     def properties_str = ''
     for (int i = 0; i < properties.size(); i++) {
         // avoid serialization errors
-        key = properties.entrySet().toArray()[i].key
-        value = properties.entrySet().toArray()[i].value
-        properties_str += "${key}=${value}&"
+        key = properties.entrySet().toArray()[i].key.trim()
+        value = properties.entrySet().toArray()[i].value.trim()
+        properties_str += /${key}=${value}&/
     }
     def search_url = "${artifactoryURL}/api/search/prop?${properties_str}"
 
-    def result = sh(script: "bash -c \"curl -X GET \'${search_url}\'\"",
+    def result = sh(script: /curl -X GET '${search_url}'/,
             returnStdout: true).trim()
     def content = new groovy.json.JsonSlurperClassic().parseText(result)
     def uri = content.get("results")
     if (uri) {
-        return uri.last().get("uri")
+        if (onlyLastItem) {
+            return uri.last().get("uri")
+        } else {
+            res = []
+            uri.each {it ->
+                res.add(it.get("uri"))
+            }
+            return res
+        }
     } else {
         return null
     }
 }
 
+
 /**
  * Set properties for artifact in Artifactory repo
  *
@@ -268,7 +280,7 @@
              passwordVariable: 'ARTIFACTORY_PASSWORD',
              usernameVariable: 'ARTIFACTORY_LOGIN']
     ]) {
-        sh "bash -c \"curl  -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} -H \"Content-Type:application/json\" -X POST -d @${queryFile} ${url}\""
+        sh "bash -c \"curl --fail -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} -H \"Content-Type:application/json\" -X POST -d @${queryFile} ${url}\""
     }
     sh "rm -v ${queryFile}"
 }
diff --git a/src/com/mirantis/mcp/Validate.groovy b/src/com/mirantis/mcp/Validate.groovy
index 44ab9f1..dab4096 100644
--- a/src/com/mirantis/mcp/Validate.groovy
+++ b/src/com/mirantis/mcp/Validate.groovy
@@ -234,76 +234,154 @@
     }
     cmd += "rally verify report --type json --to ${dest_folder}/report-tempest.json; " +
         "rally verify report --type html --to ${dest_folder}/report-tempest.html"
-    salt.cmdRun(master, target, "docker run -i --rm --net=host -e ${env_vars} " +
+    salt.cmdRun(master, target, "docker run -w /home/rally -i --rm --net=host -e ${env_vars} " +
         "-v ${results}:${dest_folder} --entrypoint /bin/bash ${dockerImageLink} " +
         "-c \"${cmd}\" > ${results}/${output_file}")
     addFiles(master, target, results, output_dir)
 }
 
 /**
+ * Make all-in-one scenario cmd for rally tests
+ *
+ * @param scenarios_path    Path to scenarios folder/file
+ * @param skip_scenarios    Comma-delimited list of scenarios names to skip
+ * @param bundle_file       Bundle name to create
+*/
+def bundle_up_scenarios(scenarios_path, skip_scenarios, bundle_file) {
+      def skip_names = ''
+      def skip_dirs = ''
+      def result = ''
+      if (skip_scenarios != ''){
+        for ( scen in skip_scenarios.split(',') ) {
+          if ( scen.contains('yaml')) {
+            skip_names += "! -name ${scen} "
+          }
+          else {
+            skip_dirs += "-path ${scenarios_path}/${scen} -prune -o "
+          }
+        }
+      }
+      result = "if [ -f ${scenarios_path} ]; then cp ${scenarios_path} ${bundle_file}; " +
+          "else " +
+          "find -L ${scenarios_path} " + skip_dirs +
+          " -name '*.yaml' " + skip_names +
+          "-exec cat {} >> ${bundle_file} \\; ; " +
+          "sed -i '/---/d' ${bundle_file}; fi; "
+
+      return result
+}
+
+/**
  * Execute rally tests
  *
  * @param target            Host to run tests
  * @param dockerImageLink   Docker image link
+ * @param platform          What do we have underneath (openstack/k8s)
  * @param output_dir        Directory for results
- * @param repository        Git repository with files for Rally
- * @param branch            Git branch which will be used during the checkout
+ * @param config_repo       Git repository with with files for Rally
+ * @param config_branch     Git config repo branch which will be used during the checkout
+ * @param plugins_repo      Git repository with Rally plugins
+ * @param plugins_branch    Git plugins repo branch which will be used during the checkout
+ * @param scenarios         Directory inside repo with specific scenarios
+ * @param sl_scenarios      Directory inside repo with specific scenarios for stacklight
+ * @param tasks_args_file   Argument file that is used for throttling settings
  * @param ext_variables     The list of external variables
  * @param results           The reports directory
  */
-def runRallyTests(master, target, dockerImageLink, output_dir, repository, branch, scenarios, tasks_args_file, ext_variables = [], results = '/root/qa_results') {
+def runRallyTests(master, target, dockerImageLink, platform, output_dir, config_repo, config_branch, plugins_repo, plugins_branch, scenarios, sl_scenarios = '', tasks_args_file = '', ext_variables = [], results = '/root/qa_results', skip_list = '') {
     def salt = new com.mirantis.mk.Salt()
     def output_file = 'docker-rally.log'
     def dest_folder = '/home/rally/qa_results'
+    def env_vars = []
+    def rally_extra_args = ''
+    def cmd_rally_plugins =
+          "git clone -b ${plugins_branch ?: 'master'} ${plugins_repo} /tmp/plugins; " +
+          "sudo pip install --upgrade /tmp/plugins; "
+    def cmd_rally_init = ''
+    def cmd_rally_checkout = "git clone -b ${config_branch ?: 'master'} ${config_repo} test_config; "
+    def cmd_rally_start = ''
+    def cmd_rally_task_args = ''
+    def cmd_rally_stacklight = ''
+    def cmd_rally_report = ''
     salt.runSaltProcessStep(master, target, 'file.remove', ["${results}"])
     salt.runSaltProcessStep(master, target, 'file.mkdir', ["${results}", "mode=777"])
-    def _pillar = salt.getPillar(master, 'I@keystone:server', 'keystone:server')
-    def keystone = _pillar['return'][0].values()[0]
-    def env_vars = ( ['tempest_version=15.0.0',
-                      "OS_USERNAME=${keystone.admin_name}",
-                      "OS_PASSWORD=${keystone.admin_password}",
-                      "OS_TENANT_NAME=${keystone.admin_tenant}",
-                      "OS_AUTH_URL=http://${keystone.bind.private_address}:${keystone.bind.private_port}/v2.0",
-                      "OS_REGION_NAME=${keystone.region}",
-                      'OS_ENDPOINT_TYPE=admin'] + ext_variables ).join(' -e ')
-    def cmd0 = ''
-    def cmd = '/opt/devops-qa-tools/deployment/configure.sh; ' +
-        'rally task start combined_scenario.yaml ' +
-        '--task-args-file /opt/devops-qa-tools/rally-scenarios/task_arguments.yaml; '
-    if (repository != '' ) {
-        cmd = 'rally db create; ' +
-            'rally deployment create --fromenv --name=existing; ' +
-            'rally deployment config; '
-        if (scenarios == '') {
-          cmd += 'rally task start test_config/rally/scenario.yaml '
-        } else {
-          cmd += "rally task start scenarios.yaml "
-          cmd0 = "git clone -b ${branch ?: 'master'} ${repository} test_config; " +
-                 "if [ -f ${scenarios} ]; then cp ${scenarios} scenarios.yaml; " +
-                 "else " +
-                 "find -L ${scenarios} -name '*.yaml' -exec cat {} >> scenarios.yaml \\; ; " +
-                 "sed -i '/---/d' scenarios.yaml; fi; "
-        }
-        switch(tasks_args_file) {
-          case 'none':
-            cmd += '; '
-            break
-          case '':
-            cmd += '--task-args-file test_config/rally/task_arguments.yaml; '
-            break
-          default:
-            cmd += "--task-args-file ${tasks_args_file}; "
-          break
-        }
+    if (platform['type'] == 'openstack') {
+      def _pillar = salt.getPillar(master, 'I@keystone:server', 'keystone:server')
+      def keystone = _pillar['return'][0].values()[0]
+      env_vars = ( ['tempest_version=15.0.0',
+          "OS_USERNAME=${keystone.admin_name}",
+          "OS_PASSWORD=${keystone.admin_password}",
+          "OS_TENANT_NAME=${keystone.admin_tenant}",
+          "OS_AUTH_URL=http://${keystone.bind.private_address}:${keystone.bind.private_port}/v2.0",
+          "OS_REGION_NAME=${keystone.region}",
+          'OS_ENDPOINT_TYPE=admin'] + ext_variables ).join(' -e ')
+      cmd_rally_init = 'rally db create; ' +
+          'rally deployment create --fromenv --name=existing; ' +
+          'rally deployment config; '
+      if (platform['stacklight_enabled'] == true) {
+        cmd_rally_stacklight = bundle_up_scenarios(sl_scenarios, skip_list, "scenarios_${platform.type}_stacklight.yaml")
+        cmd_rally_stacklight += "rally $rally_extra_args task start scenarios_${platform.type}_stacklight.yaml " +
+            "--task-args-file test_config/job-params-stacklight.yaml; "
+      }
+    } else if (platform['type'] == 'k8s') {
+      rally_extra_args = "--debug --log-file ${dest_folder}/task.log"
+      def _pillar = salt.getPillar(master, 'I@kubernetes:master and *01*', 'kubernetes:master')
+      def kubernetes = _pillar['return'][0].values()[0]
+      env_vars = [
+          "KUBERNETES_HOST=${kubernetes.apiserver.vip_address}" +
+          ":${kubernetes.apiserver.insecure_port}",
+          "KUBERNETES_CERT_AUTH=${dest_folder}/k8s-ca.crt",
+          "KUBERNETES_CLIENT_KEY=${dest_folder}/k8s-client.key",
+          "KUBERNETES_CLIENT_CERT=${dest_folder}/k8s-client.crt"].join(' -e ')
+      def k8s_ca = salt.getReturnValues(salt.runSaltProcessStep(master,
+                        'I@kubernetes:master and *01*', 'cmd.run',
+                        ["cat /etc/kubernetes/ssl/ca-kubernetes.crt"]))
+      def k8s_client_key = salt.getReturnValues(salt.runSaltProcessStep(master,
+                        'I@kubernetes:master and *01*', 'cmd.run',
+                        ["cat /etc/kubernetes/ssl/kubelet-client.key"]))
+      def k8s_client_crt = salt.getReturnValues(salt.runSaltProcessStep(master,
+                        'I@kubernetes:master and *01*', 'cmd.run',
+                        ["cat /etc/kubernetes/ssl/kubelet-client.crt"]))
+      def tmp_dir = '/tmp/kube'
+      salt.runSaltProcessStep(master, target, 'file.mkdir', ["${tmp_dir}", "mode=777"])
+      writeFile file: "${tmp_dir}/k8s-ca.crt", text: k8s_ca
+      writeFile file: "${tmp_dir}/k8s-client.key", text: k8s_client_key
+      writeFile file: "${tmp_dir}/k8s-client.crt", text: k8s_client_crt
+      salt.cmdRun(master, target, "mv ${tmp_dir}/* ${results}/")
+      salt.runSaltProcessStep(master, target, 'file.rmdir', ["${tmp_dir}"])
+      cmd_rally_init = "rally db recreate; " +
+          "rally env create --name k8s --from-sysenv; " +
+          "rally env check k8s; "
+    } else {
+      throw new Exception("Platform ${platform} is not supported yet")
     }
-    cmd += "rally task export --type junit-xml --to ${dest_folder}/report-rally.xml; " +
+    cmd_rally_checkout += bundle_up_scenarios(scenarios, skip_list, "scenarios_${platform.type}.yaml")
+    cmd_rally_start = "rally $rally_extra_args task start scenarios_${platform.type}.yaml "
+    if (config_repo != '' ) {
+      switch(tasks_args_file) {
+        case 'none':
+          cmd_rally_task_args = '; '
+          break
+        case '':
+          cmd_rally_task_args = '--task-args-file test_config/job-params-light.yaml; '
+          break
+        default:
+          cmd_rally_task_args = "--task-args-file ${tasks_args_file}; "
+        break
+      }
+    }
+    cmd_rally_report= "rally task export --type junit-xml --to ${dest_folder}/report-rally.xml; " +
         "rally task report --out ${dest_folder}/report-rally.html"
-    full_cmd = cmd0 + cmd
+    full_cmd = 'set -xe; ' + cmd_rally_plugins +
+        cmd_rally_init + cmd_rally_checkout +
+        'set +e; ' + cmd_rally_start +
+        cmd_rally_task_args + cmd_rally_stacklight +
+        cmd_rally_report
     salt.runSaltProcessStep(master, target, 'file.touch', ["${results}/rally.db"])
     salt.cmdRun(master, target, "chmod 666 ${results}/rally.db")
-    salt.cmdRun(master, target, "docker run -i --rm --net=host -e ${env_vars} " +
+    salt.cmdRun(master, target, "docker run -w /home/rally -i --rm --net=host -e ${env_vars} " +
         "-v ${results}:${dest_folder} " +
-        "-v ${results}/rally.db:/home/rally/.rally/rally.db " +
+        "-v ${results}/rally.db:/home/rally/data/rally.db " +
         "--entrypoint /bin/bash ${dockerImageLink} " +
         "-c \"${full_cmd}\" > ${results}/${output_file}")
     addFiles(master, target, results, output_dir)
@@ -415,15 +493,21 @@
                        conf_script_path="", ext_variables = []) {
     def salt = new com.mirantis.mk.Salt()
     if (testing_tools_repo != "" ) {
-        salt.cmdRun(master, target, "docker exec cvp git clone ${testing_tools_repo} cvp-configuration")
-        configure_script = conf_script_path != "" ? conf_script_path : "cvp-configuration/configure.sh"
-    } else {
-        configure_script = conf_script_path != "" ? conf_script_path : "/opt/devops-qa-tools/deployment/configure.sh"
+        if (testing_tools_repo.contains('http://') || testing_tools_repo.contains('https://')) {
+            salt.cmdRun(master, target, "docker exec cvp git clone ${testing_tools_repo} cvp-configuration")
+            configure_script = conf_script_path != "" ? conf_script_path : "cvp-configuration/configure.sh"
+        }
+        else {
+            configure_script = testing_tools_repo
+        }
+        ext_variables.addAll("PROXY=${proxy}", "TEMPEST_REPO=${tempest_repo}",
+                             "TEMPEST_ENDPOINT_TYPE=${tempest_endpoint_type}",
+                             "tempest_version=${tempest_version}")
+        salt.cmdRun(master, target, "docker exec -e " + ext_variables.join(' -e ') + " cvp bash -c ${configure_script}")
     }
-    ext_variables.addAll("PROXY=${proxy}", "TEMPEST_REPO=${tempest_repo}",
-                         "TEMPEST_ENDPOINT_TYPE=${tempest_endpoint_type}",
-                         "tempest_version=${tempest_version}")
-    salt.cmdRun(master, target, "docker exec -e " + ext_variables.join(' -e ') + " cvp bash -c ${configure_script}")
+    else {
+        common.infoMsg("TOOLS_REPO is empty, no confguration is needed for container")
+    }
 }
 
 /**
@@ -606,10 +690,27 @@
 def prepareVenv(repo_url, proxy) {
     def python = new com.mirantis.mk.Python()
     repo_name = "${repo_url}".tokenize("/").last()
+    if (repo_url.tokenize().size() > 1){
+        if (repo_url.tokenize()[1] == '-b'){
+            repo_name = repo_url.tokenize()[0].tokenize("/").last()
+        }
+    }
+    path_venv = "${env.WORKSPACE}/venv"
+    path_req = "${env.WORKSPACE}/${repo_name}/requirements.txt"
     sh "rm -rf ${repo_name}"
-    withEnv(["HTTPS_PROXY=${proxy}", "HTTP_PROXY=${proxy}", "https_proxy=${proxy}", "http_proxy=${proxy}"]) {
+    // this is temporary W/A for offline deployments
+    // Jenkins slave image has /opt/pip-mirror/ folder
+    // where pip wheels for cvp projects are located
+    if (proxy != 'offline') {
+        withEnv(["HTTPS_PROXY=${proxy}", "HTTP_PROXY=${proxy}", "https_proxy=${proxy}", "http_proxy=${proxy}"]) {
+            sh "git clone ${repo_url}"
+            python.setupVirtualenv(path_venv, "python2", [], path_req, true)
+        }
+    }
+    else {
         sh "git clone ${repo_url}"
-        python.setupVirtualenv("${env.WORKSPACE}/venv", "python2", [], "${env.WORKSPACE}/${repo_name}/requirements.txt", true)
+        sh "virtualenv ${path_venv} --python python2"
+        python.runVirtualenvCommand(path_venv, "pip install --no-index --find-links=/opt/pip-mirror/ -r ${path_req}", true)
     }
 }
 
diff --git a/src/com/mirantis/mk/Aptly.groovy b/src/com/mirantis/mk/Aptly.groovy
index 94f085f..acad991 100644
--- a/src/com/mirantis/mk/Aptly.groovy
+++ b/src/com/mirantis/mk/Aptly.groovy
@@ -117,7 +117,7 @@
 
 }
 
-def publish(server, config='/etc/aptly-publisher.yaml', recreate=false, only_latest=true, force_overwrite=true, opts='-d --timeout 3600') {
+def publish(server, config='/etc/aptly/publisher.yaml', recreate=false, only_latest=true, force_overwrite=true, opts='-d --timeout 3600') {
     if (recreate == true) {
         opts = "${opts} --recreate"
     }
diff --git a/src/com/mirantis/mk/Common.groovy b/src/com/mirantis/mk/Common.groovy
index 9a82fbe..87b1696 100644
--- a/src/com/mirantis/mk/Common.groovy
+++ b/src/com/mirantis/mk/Common.groovy
@@ -5,6 +5,7 @@
 
 import com.cloudbees.groovy.cps.NonCPS
 import groovy.json.JsonSlurperClassic
+
 /**
  *
  * Common functions
@@ -14,9 +15,9 @@
 /**
  * Generate current timestamp
  *
- * @param format    Defaults to yyyyMMddHHmmss
+ * @param format Defaults to yyyyMMddHHmmss
  */
-def getDatetime(format="yyyyMMddHHmmss") {
+def getDatetime(format = "yyyyMMddHHmmss") {
     def now = new Date();
     return now.format(format, TimeZone.getTimeZone('UTC'));
 }
@@ -26,14 +27,14 @@
  * Currently implemented by calling pwd so it won't return relevant result in
  * dir context
  */
-def getWorkspace(includeBuildNum=false) {
+def getWorkspace(includeBuildNum = false) {
     def workspace = sh script: 'pwd', returnStdout: true
     workspace = workspace.trim()
-    if(includeBuildNum){
-       if(!workspace.endsWith("/")){
-          workspace += "/"
-       }
-       workspace += env.BUILD_NUMBER
+    if (includeBuildNum) {
+        if (!workspace.endsWith("/")) {
+            workspace += "/"
+        }
+        workspace += env.BUILD_NUMBER
     }
     return workspace
 }
@@ -43,7 +44,7 @@
  * Must be run from context of node
  */
 def getJenkinsUid() {
-    return sh (
+    return sh(
         script: 'id -u',
         returnStdout: true
     ).trim()
@@ -54,7 +55,7 @@
  * Must be run from context of node
  */
 def getJenkinsGid() {
-    return sh (
+    return sh(
         script: 'id -g',
         returnStdout: true
     ).trim()
@@ -64,7 +65,7 @@
  * Returns Jenkins user uid and gid in one list (in that order)
  * Must be run from context of node
  */
-def getJenkinsUserIds(){
+def getJenkinsUserIds() {
     return sh(script: "id -u && id -g", returnStdout: true).tokenize("\n")
 }
 
@@ -72,37 +73,37 @@
  *
  * Find credentials by ID
  *
- * @param credsId    Credentials ID
- * @param credsType  Credentials type (optional)
+ * @param credsId Credentials ID
+ * @param credsType Credentials type (optional)
  *
  */
 def getCredentialsById(String credsId, String credsType = 'any') {
     def credClasses = [ // ordered by class name
-        sshKey:     com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey.class,
-        cert:       com.cloudbees.plugins.credentials.common.CertificateCredentials.class,
-        password:   com.cloudbees.plugins.credentials.common.StandardUsernamePasswordCredentials.class,
-        any:        com.cloudbees.plugins.credentials.impl.BaseStandardCredentials.class,
-        dockerCert: org.jenkinsci.plugins.docker.commons.credentials.DockerServerCredentials.class,
-        file:       org.jenkinsci.plugins.plaincredentials.FileCredentials.class,
-        string:     org.jenkinsci.plugins.plaincredentials.StringCredentials.class,
+                        sshKey    : com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey.class,
+                        cert      : com.cloudbees.plugins.credentials.common.CertificateCredentials.class,
+                        password  : com.cloudbees.plugins.credentials.common.StandardUsernamePasswordCredentials.class,
+                        any       : com.cloudbees.plugins.credentials.impl.BaseStandardCredentials.class,
+                        dockerCert: org.jenkinsci.plugins.docker.commons.credentials.DockerServerCredentials.class,
+                        file      : org.jenkinsci.plugins.plaincredentials.FileCredentials.class,
+                        string    : org.jenkinsci.plugins.plaincredentials.StringCredentials.class,
     ]
     return com.cloudbees.plugins.credentials.CredentialsProvider.lookupCredentials(
         credClasses[credsType],
         jenkins.model.Jenkins.instance
-    ).findAll {cred -> cred.id == credsId}[0]
+    ).findAll { cred -> cred.id == credsId }[0]
 }
 
 /**
  * Get credentials from store
  *
- * @param id    Credentials name
+ * @param id Credentials name
  */
 def getCredentials(id, cred_type = "username_password") {
     warningMsg('You are using obsolete function. Please switch to use `getCredentialsById()`')
 
     type_map = [
         username_password: 'password',
-        key:               'sshKey',
+        key              : 'sshKey',
     ]
 
     return getCredentialsById(id, type_map[cred_type])
@@ -122,7 +123,7 @@
  * Print pretty-printed string representation of given item
  * @param item item to be pretty-printed (list, map, whatever)
  */
-def prettyPrint(item){
+def prettyPrint(item) {
     println prettify(item)
 }
 
@@ -131,7 +132,7 @@
  * @param item item to be pretty-printed (list, map, whatever)
  * @return pretty-printed string
  */
-def prettify(item){
+def prettify(item) {
     return groovy.json.JsonOutput.prettyPrint(toJson(item)).replace('\\n', System.getProperty('line.separator'))
 }
 
@@ -180,22 +181,15 @@
  * @param msg
  * @param color Colorful output or not
  */
-def debugMsg(msg, color = true){
+def debugMsg(msg, color = true) {
     // if debug property exists on env, debug is enabled
-    if(env.getEnvironment().containsKey('DEBUG') && env['DEBUG'] == "true"){
+    if (env.getEnvironment().containsKey('DEBUG') && env['DEBUG'] == "true") {
         printMsg("[DEBUG] ${msg}", "red")
     }
 }
 
-/**
- * Print message
- *
- * @param msg        Message to be printed
- * @param level      Level of message (default INFO)
- * @param color      Color to use for output or false (default)
- */
-def printMsg(msg, color = false) {
-    colors = [
+def getColorizedString(msg, color) {
+    def colorMap = [
         'red'   : '\u001B[31m',
         'black' : '\u001B[30m',
         'green' : '\u001B[32m',
@@ -206,11 +200,18 @@
         'white' : '\u001B[37m',
         'reset' : '\u001B[0m'
     ]
-    if (color != false) {
-        print "${colors[color]}${msg}${colors.reset}"
-    } else {
-        print "[${level}] ${msg}"
-    }
+
+    return "${colorMap[color]}${msg}${colorMap.reset}"
+}
+
+/**
+ * Print message
+ *
+ * @param msg Message to be printed
+ * @param color Color to use for output
+ */
+def printMsg(msg, color) {
+    print getColorizedString(msg, color)
 }
 
 /**
@@ -220,7 +221,7 @@
  * @param type Type of files to search (groovy.io.FileType.FILES)
  */
 @NonCPS
-def getFiles(path, type=groovy.io.FileType.FILES) {
+def getFiles(path, type = groovy.io.FileType.FILES) {
     files = []
     new File(path).eachFile(type) {
         files[] = it
@@ -236,7 +237,7 @@
  */
 @NonCPS
 def entries(m) {
-    m.collect {k, v -> [k, v]}
+    m.collect { k, v -> [k, v] }
 }
 
 /**
@@ -246,20 +247,20 @@
  */
 def serial(steps) {
     stepsArray = entries(steps)
-    for (i=0; i < stepsArray.size; i++) {
+    for (i = 0; i < stepsArray.size; i++) {
         def step = stepsArray[i]
         def dummySteps = [:]
         def stepKey
-        if(step[1] instanceof List || step[1] instanceof Map){
-            for(j=0;j < step[1].size(); j++){
-                if(step[1] instanceof List){
+        if (step[1] instanceof List || step[1] instanceof Map) {
+            for (j = 0; j < step[1].size(); j++) {
+                if (step[1] instanceof List) {
                     stepKey = j
-                }else if(step[1] instanceof Map){
+                } else if (step[1] instanceof Map) {
                     stepKey = step[1].keySet()[j]
                 }
-                dummySteps.put("step-${step[0]}-${stepKey}",step[1][stepKey])
+                dummySteps.put("step-${step[0]}-${stepKey}", step[1][stepKey])
             }
-        }else{
+        } else {
             dummySteps.put(step[0], step[1])
         }
         parallel dummySteps
@@ -271,18 +272,18 @@
  * @param inputList input list
  * @param partitionSize (partition size, optional, default 5)
  */
-def partitionList(inputList, partitionSize=5){
-  List<List<String>> partitions = new ArrayList<>();
-  for (int i=0; i<inputList.size(); i += partitionSize) {
-      partitions.add(new ArrayList<String>(inputList.subList(i, Math.min(i + partitionSize, inputList.size()))));
-  }
-  return partitions
+def partitionList(inputList, partitionSize = 5) {
+    List<List<String>> partitions = new ArrayList<>();
+    for (int i = 0; i < inputList.size(); i += partitionSize) {
+        partitions.add(new ArrayList<String>(inputList.subList(i, Math.min(i + partitionSize, inputList.size()))));
+    }
+    return partitions
 }
 
 /**
  * Get password credentials from store
  *
- * @param id    Credentials name
+ * @param id Credentials name
  */
 def getPasswordCredentials(id) {
     return getCredentialsById(id, 'password')
@@ -291,7 +292,7 @@
 /**
  * Get SSH credentials from store
  *
- * @param id    Credentials name
+ * @param id Credentials name
  */
 def getSshCredentials(id) {
     return getCredentialsById(id, 'sshKey')
@@ -303,28 +304,28 @@
  * @return boolean result
  */
 @NonCPS
-def jenkinsHasPlugin(pluginName){
-    return Jenkins.instance.pluginManager.plugins.collect{p -> p.shortName}.contains(pluginName)
+def jenkinsHasPlugin(pluginName) {
+    return Jenkins.instance.pluginManager.plugins.collect { p -> p.shortName }.contains(pluginName)
 }
 
 @NonCPS
 def _needNotification(notificatedTypes, buildStatus, jobName) {
-    if(notificatedTypes && notificatedTypes.contains("onchange")){
-        if(jobName){
+    if (notificatedTypes && notificatedTypes.contains("onchange")) {
+        if (jobName) {
             def job = Jenkins.instance.getItem(jobName)
             def numbuilds = job.builds.size()
-            if (numbuilds > 0){
+            if (numbuilds > 0) {
                 //actual build is first for some reasons, so last finished build is second
                 def lastBuild = job.builds[1]
-                if(lastBuild){
-                    if(lastBuild.result.toString().toLowerCase().equals(buildStatus)){
+                if (lastBuild) {
+                    if (lastBuild.result.toString().toLowerCase().equals(buildStatus)) {
                         println("Build status didn't changed since last build, not sending notifications")
                         return false;
                     }
                 }
             }
         }
-    }else if(!notificatedTypes.contains(buildStatus)){
+    } else if (!notificatedTypes.contains(buildStatus)) {
         return false;
     }
     return true;
@@ -343,7 +344,7 @@
  * @param mailFrom mail FROM param, if empty "jenkins" will be used, it's mandatory for sending email notifications
  * @param mailTo mail TO param, it's mandatory for sending email notifications, this option enable mail notification
  */
-def sendNotification(buildStatus, msgText="", enabledNotifications = [], notificatedTypes=["onchange"], jobName=null, buildNumber=null, buildUrl=null, mailFrom="jenkins", mailTo=null){
+def sendNotification(buildStatus, msgText = "", enabledNotifications = [], notificatedTypes = ["onchange"], jobName = null, buildNumber = null, buildUrl = null, mailFrom = "jenkins", mailTo = null) {
     // Default values
     def colorName = 'blue'
     def colorCode = '#0000FF'
@@ -354,40 +355,40 @@
     def subject = "${buildStatusParam}: Job '${jobNameParam} [${buildNumberParam}]'"
     def summary = "${subject} (${buildUrlParam})"
 
-    if(msgText != null && msgText != ""){
-        summary+="\n${msgText}"
+    if (msgText != null && msgText != "") {
+        summary += "\n${msgText}"
     }
-    if(buildStatusParam.toLowerCase().equals("success")){
+    if (buildStatusParam.toLowerCase().equals("success")) {
         colorCode = "#00FF00"
         colorName = "green"
-    }else if(buildStatusParam.toLowerCase().equals("unstable")){
+    } else if (buildStatusParam.toLowerCase().equals("unstable")) {
         colorCode = "#FFFF00"
         colorName = "yellow"
-    }else if(buildStatusParam.toLowerCase().equals("failure")){
+    } else if (buildStatusParam.toLowerCase().equals("failure")) {
         colorCode = "#FF0000"
         colorName = "red"
     }
-    if(_needNotification(notificatedTypes, buildStatusParam.toLowerCase(), jobNameParam)){
-        if(enabledNotifications.contains("slack") && jenkinsHasPlugin("slack")){
-            try{
+    if (_needNotification(notificatedTypes, buildStatusParam.toLowerCase(), jobNameParam)) {
+        if (enabledNotifications.contains("slack") && jenkinsHasPlugin("slack")) {
+            try {
                 slackSend color: colorCode, message: summary
-            }catch(Exception e){
+            } catch (Exception e) {
                 println("Calling slack plugin failed")
                 e.printStackTrace()
             }
         }
-        if(enabledNotifications.contains("hipchat") && jenkinsHasPlugin("hipchat")){
-            try{
+        if (enabledNotifications.contains("hipchat") && jenkinsHasPlugin("hipchat")) {
+            try {
                 hipchatSend color: colorName.toUpperCase(), message: summary
-            }catch(Exception e){
+            } catch (Exception e) {
                 println("Calling hipchat plugin failed")
                 e.printStackTrace()
             }
         }
-        if(enabledNotifications.contains("email") && mailTo != null && mailTo != "" && mailFrom != null && mailFrom != ""){
-            try{
+        if (enabledNotifications.contains("email") && mailTo != null && mailTo != "" && mailFrom != null && mailFrom != "") {
+            try {
                 mail body: summary, from: mailFrom, subject: subject, to: mailTo
-            }catch(Exception e){
+            } catch (Exception e) {
                 println("Sending mail plugin failed")
                 e.printStackTrace()
             }
@@ -402,16 +403,15 @@
  * @return index-th element
  */
 
-def cutOrDie(cmd, index)
-{
+def cutOrDie(cmd, index) {
     def common = new com.mirantis.mk.Common()
     def output
     try {
-      output = sh(script: cmd, returnStdout: true)
-      def result = output.tokenize(" ")[index]
-      return result;
+        output = sh(script: cmd, returnStdout: true)
+        def result = output.tokenize(" ")[index]
+        return result;
     } catch (Exception e) {
-      common.errorMsg("Failed to execute cmd: ${cmd}\n output: ${output}")
+        common.errorMsg("Failed to execute cmd: ${cmd}\n output: ${output}")
     }
 }
 
@@ -423,7 +423,7 @@
  */
 
 def checkContains(variable, keyword) {
-    if(env.getEnvironment().containsKey(variable)){
+    if (env.getEnvironment().containsKey(variable)) {
         return env[variable] && env[variable].toLowerCase().contains(keyword.toLowerCase())
     } else {
         return false
@@ -435,19 +435,22 @@
  * @param jsonString input JSON string
  * @return created hashmap
  */
-def parseJSON(jsonString){
-   def m = [:]
-   def lazyMap = new JsonSlurperClassic().parseText(jsonString)
-   m.putAll(lazyMap)
-   return m
+def parseJSON(jsonString) {
+    def m = [:]
+    def lazyMap = new JsonSlurperClassic().parseText(jsonString)
+    m.putAll(lazyMap)
+    return m
 }
 
 /**
  * Test pipeline input parameter existence and validity (not null and not empty string)
  * @param paramName input parameter name (usually uppercase)
- */
-def validInputParam(paramName){
-    return env.getEnvironment().containsKey(paramName) && env[paramName] != null && env[paramName] != ""
+  */
+def validInputParam(paramName) {
+    if (paramName instanceof java.lang.String) {
+        return env.getEnvironment().containsKey(paramName) && env[paramName] != null && env[paramName] != ""
+    }
+    return false
 }
 
 /**
@@ -460,7 +463,7 @@
 
 @NonCPS
 def countHashMapEquals(lm, param, eq) {
-    return lm.stream().filter{i -> i[param].equals(eq)}.collect(java.util.stream.Collectors.counting())
+    return lm.stream().filter { i -> i[param].equals(eq) }.collect(java.util.stream.Collectors.counting())
 }
 
 /**
@@ -476,7 +479,7 @@
     def stdout = sh(script: 'mktemp', returnStdout: true).trim()
 
     try {
-        def status = sh(script:"${cmd} 1>${stdout} 2>${stderr}", returnStatus: true)
+        def status = sh(script: "${cmd} 1>${stdout} 2>${stderr}", returnStatus: true)
         res['stderr'] = sh(script: "cat ${stderr}", returnStdout: true)
         res['stdout'] = sh(script: "cat ${stdout}", returnStdout: true)
         res['status'] = status
@@ -488,7 +491,6 @@
     return res
 }
 
-
 /**
  * Retry commands passed to body
  *
@@ -496,17 +498,16 @@
  * @param delay Delay between retries (in seconds)
  * @param body Commands to be in retry block
  * @return calling commands in body
- * @example retry(3,5){ function body }
- *          retry{ function body }
+ * @example retry ( 3 , 5 ) { function body }*          retry{ function body }
  */
 
 def retry(int times = 5, int delay = 0, Closure body) {
     int retries = 0
     def exceptions = []
-    while(retries++ < times) {
+    while (retries++ < times) {
         try {
             return body.call()
-        } catch(e) {
+        } catch (e) {
             sleep(delay)
         }
     }
@@ -514,28 +515,413 @@
     throw new Exception("Failed after $times retries")
 }
 
-
 /**
  * Wait for user input with timeout
  *
  * @param timeoutInSeconds Timeout
  * @param options Options for input widget
  */
-def waitForInputThenPass(timeoutInSeconds, options=[message: 'Ready to go?']) {
-  def userInput = true
-  try {
-    timeout(time: timeoutInSeconds, unit: 'SECONDS') {
-      userInput = input options
+def waitForInputThenPass(timeoutInSeconds, options = [message: 'Ready to go?']) {
+    def userInput = true
+    try {
+        timeout(time: timeoutInSeconds, unit: 'SECONDS') {
+            userInput = input options
+        }
+    } catch (err) { // timeout reached or input false
+        def user = err.getCauses()[0].getUser()
+        if ('SYSTEM' == user.toString()) { // SYSTEM means timeout.
+            println("Timeout, proceeding")
+        } else {
+            userInput = false
+            println("Aborted by: [${user}]")
+            throw err
+        }
     }
-  } catch(err) { // timeout reached or input false
-    def user = err.getCauses()[0].getUser()
-    if('SYSTEM' == user.toString()) { // SYSTEM means timeout.
-      println("Timeout, proceeding")
+    return userInput
+}
+
+/**
+ * Function receives Map variable as input and sorts it
+ * by values ascending. Returns sorted Map
+ * @param _map Map variable
+ */
+@NonCPS
+def SortMapByValueAsc(_map) {
+    def sortedMap = _map.sort { it.value }
+    return sortedMap
+}
+
+/**
+ *  Compare 'old' and 'new' dir's recursively
+ * @param diffData =' Only in new/XXX/infra: secrets.yml
+ Files old/XXX/init.yml and new/XXX/init.yml differ
+ Only in old/XXX/infra: secrets11.yml '
+ *
+ * @return
+ *   - new:
+ - XXX/secrets.yml
+ - diff:
+ - XXX/init.yml
+ - removed:
+ - XXX/secrets11.yml
+
+ */
+def diffCheckMultidir(diffData) {
+    common = new com.mirantis.mk.Common()
+    // Some global constants. Don't change\move them!
+    keyNew = 'new'
+    keyRemoved = 'removed'
+    keyDiff = 'diff'
+    def output = [
+        new    : [],
+        removed: [],
+        diff   : [],
+    ]
+    String pathSep = '/'
+    diffData.each { line ->
+        def job_file = ''
+        def job_type = ''
+        if (line.startsWith('Files old/')) {
+            job_file = new File(line.replace('Files old/', '').tokenize()[0])
+            job_type = keyDiff
+        } else if (line.startsWith('Only in new/')) {
+            // get clean normalized filepath, under new/
+            job_file = new File(line.replace('Only in new/', '').replace(': ', pathSep)).toString()
+            job_type = keyNew
+        } else if (line.startsWith('Only in old/')) {
+            // get clean normalized filepath, under old/
+            job_file = new File(line.replace('Only in old/', '').replace(': ', pathSep)).toString()
+            job_type = keyRemoved
+        } else {
+            common.warningMsg("Not parsed diff line: ${line}!")
+        }
+        if (job_file != '') {
+            output[job_type].push(job_file)
+        }
+    }
+    return output
+}
+
+/**
+ * Compare 2 folder, file by file
+ * Structure should be:
+ * ${compRoot}/
+ └── diff - diff results will be save here
+ ├── new  - input folder with data
+ ├── old  - input folder with data
+ ├── pillar.diff - globall diff will be saved here
+ * b_url - usual env.BUILD_URL, to be add into description
+ * grepOpts -   General grep cmdline; Could be used to pass some magic
+ *              regexp into after-diff listing file(pillar.diff)
+ *              Example: '-Ev infra/secrets.yml'
+ * return - html-based string
+ * TODO: allow to specify subdir for results?
+ **/
+
+def comparePillars(compRoot, b_url, grepOpts) {
+
+    // Some global constants. Don't change\move them!
+    keyNew = 'new'
+    keyRemoved = 'removed'
+    keyDiff = 'diff'
+    def diff_status = 0
+    // FIXME
+    httpWS = b_url + '/artifact/'
+    dir(compRoot) {
+        // If diff empty - exit 0
+        diff_status = sh(script: 'diff -q -r old/ new/  > pillar.diff',
+            returnStatus: true,
+        )
+    }
+    // Unfortunately, diff not able to work with dir-based regexp
+    if (diff_status == 1 && grepOpts) {
+        dir(compRoot) {
+            grep_status = sh(script: """
+                cp -v pillar.diff pillar_orig.diff
+                grep ${grepOpts} pillar_orig.diff  > pillar.diff
+                """,
+                returnStatus: true
+            )
+            if (grep_status == 1) {
+                warningMsg("Grep regexp ${grepOpts} removed all diff!")
+                diff_status = 0
+            }
+        }
+    }
+    // Set job description
+    description = ''
+    if (diff_status == 1) {
+        // Analyse output file and prepare array with results
+        String data_ = readFile file: "${compRoot}/pillar.diff"
+        def diff_list = diffCheckMultidir(data_.split("\\r?\\n"))
+        infoMsg(diff_list)
+        dir(compRoot) {
+            if (diff_list[keyDiff].size() > 0) {
+                if (!fileExists('diff')) {
+                    sh('mkdir -p diff')
+                }
+                description += '<b>CHANGED</b><ul>'
+                infoMsg('Changed items:')
+                def stepsForParallel = [:]
+                stepsForParallel.failFast = true
+                diff_list[keyDiff].each {
+                    stepsForParallel.put("Differ for:${it}",
+                        {
+                            // We don't want to handle sub-dirs structure. So, simply make diff 'flat'
+                            def item_f = it.toString().replace('/', '_')
+                            description += "<li><a href=\"${httpWS}/diff/${item_f}/*view*/\">${it}</a></li>"
+                            // Generate diff file
+                            def diff_exit_code = sh([
+                                script      : "diff -U 50 old/${it} new/${it} > diff/${item_f}",
+                                returnStdout: false,
+                                returnStatus: true,
+                            ])
+                            // catch normal errors, diff should always return 1
+                            if (diff_exit_code != 1) {
+                                error 'Error with diff file generation'
+                            }
+                        })
+                }
+
+                parallel stepsForParallel
+            }
+            if (diff_list[keyNew].size() > 0) {
+                description += '<b>ADDED</b><ul>'
+                for (item in diff_list[keyNew]) {
+                    description += "<li><a href=\"${httpWS}/new/${item}/*view*/\">${item}</a></li>"
+                }
+            }
+            if (diff_list[keyRemoved].size() > 0) {
+                description += '<b>DELETED</b><ul>'
+                for (item in diff_list[keyRemoved]) {
+                    description += "<li><a href=\"${httpWS}/old/${item}/*view*/\">${item}</a></li>"
+                }
+            }
+
+        }
+    }
+
+    if (description != '') {
+        dir(compRoot) {
+            archiveArtifacts([
+                artifacts        : '**',
+                allowEmptyArchive: true,
+            ])
+        }
+        return description.toString()
     } else {
-      userInput = false
-      println("Aborted by: [${user}]")
-      throw err
+        return '<b>No job changes</b>'
     }
-  }
-  return userInput
+}
+
+/**
+ * Simple function, to get basename from string.
+ * line - path-string
+ * remove_ext - string, optionl. Drop file extenstion.
+ **/
+def GetBaseName(line, remove_ext) {
+    filename = line.toString().split('/').last()
+    if (remove_ext && filename.endsWith(remove_ext.toString())) {
+        filename = filename.take(filename.lastIndexOf(remove_ext.toString()))
+    }
+    return filename
+}
+
+/**
+ * Return colored string of specific stage in stageMap
+ *
+ * @param stageMap LinkedHashMap object.
+ * @param stageName The name of current stage we are going to execute.
+ * @param color Text color
+ * */
+def getColoredStageView(stageMap, stageName, color) {
+    def stage = stageMap[stageName]
+    def banner = []
+    def currentStageIndex = new ArrayList<String>(stageMap.keySet()).indexOf(stageName)
+    def numberOfStages = stageMap.keySet().size() - 1
+
+    banner.add(getColorizedString(
+        "=========== Stage ${currentStageIndex}/${numberOfStages}: ${stageName} ===========", color))
+    for (stage_item in stage.keySet()) {
+        banner.add(getColorizedString(
+            "${stage_item}: ${stage[stage_item]}", color))
+    }
+    banner.add('\n')
+
+    return banner
+}
+
+/**
+ * Pring stageMap to console with specified color
+ *
+ * @param stageMap LinkedHashMap object with stages information.
+ * @param currentStage The name of current stage we are going to execute.
+ *
+ * */
+def printCurrentStage(stageMap, currentStage) {
+    print getColoredStageView(stageMap, currentStage, "cyan").join('\n')
+}
+
+/**
+ * Pring stageMap to console with specified color
+ *
+ * @param stageMap LinkedHashMap object.
+ * @param baseColor Text color (default white)
+ * */
+def printStageMap(stageMap, baseColor = "white") {
+    def banner = []
+    def index = 0
+    for (stage_name in stageMap.keySet()) {
+        banner.addAll(getColoredStageView(stageMap, stage_name, baseColor))
+    }
+    print banner.join('\n')
+}
+
+/**
+ * Wrap provided code in stage, and do interactive retires if needed.
+ *
+ * @param stageMap LinkedHashMap object with stages information.
+ * @param currentStage The name of current stage we are going to execute.
+ * @param target Target host to execute stage on.
+ * @param interactive Boolean flag to specify if interaction with user is enabled.
+ * @param body Command to be in stage block.
+ * */
+def stageWrapper(stageMap, currentStage, target, interactive = true, Closure body) {
+    def common = new com.mirantis.mk.Common()
+    def banner = []
+
+    printCurrentStage(stageMap, currentStage)
+
+    stage(currentStage) {
+      if (interactive){
+        input message: getColorizedString("We are going to execute stage \'${currentStage}\' on the following target ${target}.\nPlease review stage information above.", "yellow")
+      }
+      try {
+        return body.call()
+        stageMap[currentStage]['Status'] = "SUCCESS"
+      } catch (Exception err) {
+        def msg = "Stage ${currentStage} failed with the following exception:\n${err}"
+        print getColorizedString(msg, "yellow")
+        common.errorMsg(err)
+        if (interactive) {
+          input message: getColorizedString("Please make sure problem is fixed to proceed with retry. Ready to proceed?", "yellow")
+          stageMap[currentStage]['Status'] = "RETRYING"
+          stageWrapper(stageMap, currentStage, target, interactive, body)
+        } else {
+          error(msg)
+        }
+      }
+    }
+}
+
+/**
+ *  Ugly transition solution for internal tests.
+ *  1) Check input => transform to static result, based on runtime and input
+ *  2) Check remote-binary repo for exact resource
+ */
+
+def checkRemoteBinary(LinkedHashMap config, List extraScmExtensions = []) {
+    def common = new com.mirantis.mk.Common()
+    res = [:]
+    res['MirrorRoot'] = config.get('globalMirrorRoot', env["BIN_MIRROR_ROOT"] ? env["BIN_MIRROR_ROOT"] : "http://mirror.mirantis.com/")
+    // Reclass-like format's. To make life eazy!
+    res['apt_mk_version'] = config.get('apt_mk_version', env["BIN_APT_MK_VERSION"] ? env["BIN_APT_MK_VERSION"] : 'nightly')
+    res['linux_system_repo_url'] = config.get('linux_system_repo_url', env["BIN_linux_system_repo_url"] ? env["BIN_linux_system_repo_url"] : "${res['MirrorRoot']}/${res['apt_mk_version']}/")
+
+    if (config.get('verify', true)) {
+        MirrorRootStatus = sh(script: "wget  --auth-no-challenge --spider ${res['linux_system_repo_url']} 2>/dev/null", returnStatus: true)
+        if (MirrorRootStatus != 0) {
+            common.warningMsg("Resource: ${res['linux_system_repo_url']} not exist")
+            res['linux_system_repo_url'] = false
+        }
+    }
+    return res
+}
+
+/**
+ *  Workaround to update env properties, like GERRIT_* vars,
+ *  which should be passed from upstream job to downstream.
+ *  Will not fail entire job in case any issues.
+ *  @param envVar - EnvActionImpl env job
+ *  @param extraVars - Multiline YAML text with extra vars
+ */
+def mergeEnv(envVar, extraVars) {
+    def common = new com.mirantis.mk.Common()
+    try {
+        def extraParams = readYaml text: extraVars
+        for(String key in extraParams.keySet()) {
+            envVar[key] = extraParams[key]
+            common.warningMsg("Parameter ${key} is updated from EXTRA vars.")
+        }
+    } catch (Exception e) {
+        common.errorMsg("Can't update env parameteres, because: ${e.toString()}")
+    }
+}
+
+/**
+ * Wrapper around parallel pipeline function
+ * with ability to restrict number of parallel threads
+ * running simultaneously
+ *
+ * @param branches - Map with Clousers to be executed
+ * @param maxParallelJob - Integer number of parallel threads allowed
+ *                         to run simultaneously
+ */
+def runParallel(branches, maxParallelJob = 10) {
+    def runningSteps = 0
+    branches.each { branchName, branchBody ->
+        if (branchBody instanceof Closure) {
+            branches[branchName] = {
+                while (!(runningSteps < maxParallelJob)) {
+                    continue
+                }
+                runningSteps += 1
+                branchBody.call()
+                runningSteps -= 1
+            }
+        }
+    }
+    if (branches) {
+        parallel branches
+    }
+}
+
+/**
+ * Ugly processing basic funcs with /etc/apt
+ * @param configYaml
+ * Example :
+ configYaml = '''
+ ---
+ distrib_revision: 'nightly'
+ aprConfD: |-
+    APT::Get::AllowUnauthenticated 'true';
+ repo:
+    mcp_saltstack:
+        source: "deb [arch=amd64] http://mirror.mirantis.com/SUB_DISTRIB_REVISION/saltstack-2017.7/xenial xenial main"
+        pinning: |-
+            Package: libsodium18
+            Pin: release o=SaltStack
+            Pin-Priority: 50
+ '''
+ *
+ */
+
+def debianExtraRepos(configYaml) {
+    def config = readYaml text: configYaml
+    def distribRevision = config.get('distrib_revision', 'nightly')
+    if (config.get('repo', false)) {
+        for (String repo in config['repo'].keySet()) {
+            source = config['repo'][repo]['source'].replace('SUB_DISTRIB_REVISION', distribRevision)
+            warningMsg("Write ${source} >  /etc/apt/sources.list.d/${repo}.list")
+            sh("echo '${source}' > /etc/apt/sources.list.d/${repo}.list")
+            // TODO implement pining
+        }
+    }
+    if (config.get('aprConfD', false)) {
+        for (String pref in config['aprConfD'].tokenize('\n')) {
+            warningMsg("Adding ${pref} => /etc/apt/apt.conf.d/99setupAndTestNode")
+            sh("echo '${pref}' >> /etc/apt/apt.conf.d/99setupAndTestNode")
+        }
+        sh('cat /etc/apt/apt.conf.d/99setupAndTestNode')
+    }
 }
diff --git a/src/com/mirantis/mk/Debian.groovy b/src/com/mirantis/mk/Debian.groovy
index 4d7b1ee..d6c82db 100644
--- a/src/com/mirantis/mk/Debian.groovy
+++ b/src/com/mirantis/mk/Debian.groovy
@@ -251,3 +251,89 @@
         sh("export GNUPGHOME=${workspace}/.gnupg; dput -f \"ppa:${ppaRepo}\" *_source.changes")
     }
 }
+
+/**
+* Upgrade packages on given target.
+*
+* @param env    Salt Connection object or env  Salt command map
+* @param target Salt target to upgrade packages on.
+*/
+def osUpgrade(env, target){
+  def common = new com.mirantis.mk.Common()
+  def salt = new com.mirantis.mk.Salt()
+
+  common.infoMsg("Running upgrade on ${target}")
+
+  salt.runSaltProcessStep(env, target, 'pkg.refresh_db', [], null, true)
+  def cmd = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade;'
+  salt.runSaltProcessStep(env, target, 'cmd.run', [cmd])
+}
+
+/**
+* Running dist-upgrade on given target.
+*
+* @param env    Salt Connection object or env  Salt command map
+* @param target Salt target to upgrade packages on.
+*/
+def osDistUpgrade(env, target){
+  def salt = new com.mirantis.mk.Salt()
+  def common = new com.mirantis.mk.Common()
+
+  common.infoMsg("Running dist-upgrade on ${target}")
+  salt.runSaltProcessStep(env, target, 'pkg.refresh_db', [], null, true)
+  def cmd = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+  salt.runSaltProcessStep(env, target, 'cmd.run', [cmd])
+}
+
+/**
+* Reboot specified target, and wait when minion is UP.
+*
+* @param env       Salt Connection object or env  Salt command map
+* @param target    Salt target to upgrade packages on.
+* @param timeout   Sleep timeout when doing retries.
+* @param attempts  Number of attemps to wait for.
+*/
+def osReboot(env, target, timeout=30, attempts=10){
+  def salt = new com.mirantis.mk.Salt()
+  def common = new com.mirantis.mk.Common()
+
+  salt.runSaltProcessStep(env, target, 'cmd.run', ["touch /tmp/rebooting"])
+  salt.runSaltProcessStep(env, target, 'system.reboot', [], null, true, 5)
+
+  common.retry(timeout, attempts){
+    if (salt.runSaltProcessStep(env, target, 'file.file_exists', ['/tmp/rebooting'], null, true, 5)['return'][0].values()[0].toBoolean()){
+      error("The system is still rebooting...")
+    }
+  }
+}
+
+/**
+* Upgrade OS on given node, wait when minion become reachable.
+*
+* @param env             Salt Connection object or env  Salt command map
+* @param target          Salt target to upgrade packages on.
+* @param mode            'upgrade' or 'dist-upgrade'
+* @param postponeReboot  Boolean flag to specify if reboot have to be postponed.
+* @param timeout   Sleep timeout when doing retries.
+* @param attempts  Number of attemps to wait for.
+*/
+def osUpgradeNode(env, target, mode, postponeReboot=false, timeout=30, attempts=10){
+    def common = new com.mirantis.mk.Common()
+    def salt = new com.mirantis.mk.Salt()
+
+    def rebootRequired = false
+    if (mode == 'dist-upgrade'){
+      osDistUpgrade(env, target)
+    } else if (mode == 'upgrade'){
+      osUpgrade(env, target)
+    }
+    rebootRequired = salt.runSaltProcessStep(env, target, 'file.file_exists', ['/var/run/reboot-required'], null, true, 5)['return'][0].values()[0].toBoolean()
+    if (rebootRequired) {
+      if (!postponeReboot){
+        common.infoMsg("Reboot is required after upgrade on ${target} Rebooting...")
+        osReboot(env, target, timeout, attempts)
+      } else {
+        common.infoMsg("Postponing reboot on node ${target}")
+      }
+    }
+}
diff --git a/src/com/mirantis/mk/Gerrit.groovy b/src/com/mirantis/mk/Gerrit.groovy
index 99c91b7..3761789 100644
--- a/src/com/mirantis/mk/Gerrit.groovy
+++ b/src/com/mirantis/mk/Gerrit.groovy
@@ -16,6 +16,8 @@
  *          - withMerge, merge master before build
  *          - withLocalBranch, prevent detached mode in repo
  *          - withWipeOut, wipe repository and force clone
+ *          - GerritTriggerBuildChooser - use magic GerritTriggerBuildChooser class from gerrit-trigger-plugin.
+ *            By default,enabled.
  *        Gerrit properties like GERRIT_SCHEMA can be passed in config as gerritSchema or will be obtained from env
  * @param extraScmExtensions list of extra scm extensions which will be used for checkout (optional)
  * @return boolean result
@@ -48,13 +50,13 @@
     def path = config.get('path', "")
     def depth = config.get('depth', 0)
     def timeout = config.get('timeout', 20)
+    def GerritTriggerBuildChooser = config.get('useGerritTriggerBuildChooser', true)
 
     def invalidParams = _getInvalidGerritParams(config)
     if (invalidParams.isEmpty()) {
         // default parameters
         def scmExtensions = [
             [$class: 'CleanCheckout'],
-            [$class: 'BuildChooserSetting', buildChooser: [$class: 'GerritTriggerBuildChooser']],
             [$class: 'CheckoutOption', timeout: timeout],
             [$class: 'CloneOption', depth: depth, noTags: false, reference: '', shallow: depth > 0, timeout: timeout]
         ]
@@ -74,9 +76,14 @@
             scmUserRemoteConfigs.put('credentialsId',credentials)
         }
 
+        // Usefull, if we only need to clone branch. W\o any refspec magic
+        if (GerritTriggerBuildChooser) {
+            scmExtensions.add([$class: 'BuildChooserSetting', buildChooser: [$class: 'GerritTriggerBuildChooser']],)
+        }
+
         // if we need to "merge" code from patchset to GERRIT_BRANCH branch
         if (merge) {
-            scmExtensions.add([$class: 'PreBuildMerge', options: [fastForwardMode: 'FF', mergeRemote: 'gerrit', mergeStrategy: 'default', mergeTarget: gerritBranch]])
+            scmExtensions.add([$class: 'PreBuildMerge', options: [fastForwardMode: 'FF', mergeRemote: 'gerrit', mergeStrategy: 'DEFAULT', mergeTarget: gerritBranch]])
         }
         // we need wipe workspace before checkout
         if (wipe) {
@@ -246,4 +253,4 @@
     def missedParams = requiredParams - config.keySet()
     def badParams = config.subMap(requiredParams).findAll{it.value in [null, '']}.keySet()
     return badParams + missedParams
-}
\ No newline at end of file
+}
diff --git a/src/com/mirantis/mk/Git.groovy b/src/com/mirantis/mk/Git.groovy
index 9e3c460..d20c159 100644
--- a/src/com/mirantis/mk/Git.groovy
+++ b/src/com/mirantis/mk/Git.groovy
@@ -198,10 +198,10 @@
         }
     }
     if (followTags == true) {
-        ssh.agentSh "git push target --tags"
+        ssh.agentSh "git push -f target --tags"
 
         if (pushSourceTags == true) {
-            ssh.agentSh "git push origin --tags"
+            ssh.agentSh "git push -f origin --tags"
         }
     }
     sh "git remote rm target"
diff --git a/src/com/mirantis/mk/Http.groovy b/src/com/mirantis/mk/Http.groovy
index 987a998..b752b42 100644
--- a/src/com/mirantis/mk/Http.groovy
+++ b/src/com/mirantis/mk/Http.groovy
@@ -112,25 +112,25 @@
 }
 
 /**
- * Make generic call using Salt REST API and return parsed JSON
+ * Make generic call using REST API and return parsed JSON
  *
- * @param master   Salt connection object
- * @param uri   URI which will be appended to Salt server base URL
- * @param method    HTTP method to use (default GET)
- * @param data      JSON data to POST or PUT
- * @param headers   Map of additional request headers
+ * @param base    connection object, map with 'url' and optional 'authToken' keys
+ * @param uri     URI which will be appended to connection base URL
+ * @param method  HTTP method to use (default GET)
+ * @param data    JSON data to POST, PUT or PATCH
+ * @param headers Map of additional request headers
  */
-def restCall(master, uri, method = 'GET', data = null, headers = [:]) {
-    def connection = new URL("${master.url}${uri}").openConnection()
+def restCall(base, uri, method = 'GET', data = null, headers = [:]) {
+    def connection = new URL("${base.url}${uri}").openConnection()
     if (method != 'GET') {
         connection.setRequestMethod(method)
     }
 
     connection.setRequestProperty('User-Agent', 'jenkins-groovy')
     connection.setRequestProperty('Accept', 'application/json')
-    if (master.authToken) {
-        // XXX: removeme
-        connection.setRequestProperty('X-Auth-Token', master.authToken)
+    if (base.authToken) {
+        // XXX: removeme, explicitly use headers instead
+        connection.setRequestProperty('X-Auth-Token', base.authToken)
     }
 
     for (header in headers) {
@@ -163,34 +163,56 @@
 }
 
 /**
- * Make GET request using Salt REST API and return parsed JSON
+ * Make GET request using REST API and return parsed JSON
  *
- * @param master   Salt connection object
- * @param uri   URI which will be appended to Salt server base URL
+ * @param base  connection object, map with 'url' and optional 'authToken' keys
+ * @param uri   URI which will be appended to server base URL
  */
-def restGet(master, uri, data = null) {
-    return restCall(master, uri, 'GET', data)
+def restGet(base, uri, data = null, headers = [:]) {
+    return restCall(base, uri, 'GET', data, headers)
 }
 
 /**
- * Make POST request using Salt REST API and return parsed JSON
+ * Make POST request using REST API and return parsed JSON
  *
- * @param master   Salt connection object
- * @param uri   URI which will be appended to Docker server base URL
+ * @param base  connection object, map with 'url' and optional 'authToken' keys
+ * @param uri   URI which will be appended to server base URL
+ * @param data  JSON Data to POST
+ */
+def restPost(base, uri, data = null, headers = ['Accept': '*/*']) {
+    return restCall(base, uri, 'POST', data, headers)
+}
+
+/**
+ * Make PUT request using REST API and return parsed JSON
+ *
+ * @param base  connection object, map with 'url' and optional 'authToken' keys
+ * @param uri   URI which will be appended to server base URL
  * @param data  JSON Data to PUT
  */
-def restPost(master, uri, data = null) {
-    return restCall(master, uri, 'POST', data, ['Accept': '*/*'])
+def restPut(base, uri, data = null, headers = ['Accept': '*/*']) {
+    return restCall(base, uri, 'PUT', data, headers)
 }
 
 /**
- * Make DELETE request using Salt REST API and return parsed JSON
+ * Make PATCH request using REST API and return parsed JSON
  *
- * @param master   Salt connection object
- * @param uri   URI which will be appended to Salt server base URL
+ * @param base  connection object, map with 'url' and optional 'authToken' keys
+ * @param uri   URI which will be appended to server base URL
+ * @param data  JSON Data to PUT
  */
-def restDelete(master, uri, data = null) {
-    return restCall(master, uri, 'DELETE', data)
+def restPatch(base, uri, data = null, headers = ['Accept': '*/*']) {
+    return restCall(base, uri, 'PATCH', data, headers)
+}
+
+/**
+ * Make DELETE request using REST API and return parsed JSON
+ *
+ * @param base  connection object, map with 'url' and optional 'authToken' keys
+ * @param uri   URI which will be appended to server base URL
+ */
+def restDelete(base, uri, data = null, headers = [:]) {
+    return restCall(base, uri, 'DELETE', data, headers)
 }
 
 /**
diff --git a/src/com/mirantis/mk/JenkinsUtils.groovy b/src/com/mirantis/mk/JenkinsUtils.groovy
index b99ac2a..d092153 100644
--- a/src/com/mirantis/mk/JenkinsUtils.groovy
+++ b/src/com/mirantis/mk/JenkinsUtils.groovy
@@ -75,3 +75,33 @@
   }
   return result
 }
+
+/**
+ * Get Jenkins job object
+ * @param jobName job name
+ * @return job object that matches jobName
+ */
+def getJobByName(jobName){
+    for(item in Hudson.instance.items) {
+        if(item.name == jobName){
+            return item
+        }
+    }
+}
+
+/**
+ * Get Jenkins job parameters
+ * @param jobName job name
+ * @return HashMap with parameter names as keys and their values as values
+ */
+def getJobParameters(jobName){
+    def job = getJobByName(jobName)
+    def prop = job.getProperty(ParametersDefinitionProperty.class)
+    def params = new java.util.HashMap<String,String>()
+    if(prop != null) {
+        for(param in prop.getParameterDefinitions()) {
+            params.put(param.name, param.defaultValue)
+        }
+    }
+    return params
+}
diff --git a/src/com/mirantis/mk/Openscap.groovy b/src/com/mirantis/mk/Openscap.groovy
new file mode 100644
index 0000000..1841f16
--- /dev/null
+++ b/src/com/mirantis/mk/Openscap.groovy
@@ -0,0 +1,52 @@
+package com.mirantis.mk
+
+/**
+ * Run salt oscap.eval xccdf
+ *
+ * @param target            the target where the benchmark will be evaluated
+ * @param evaltype          what to evaluate (xccdf or oval)
+ * @param benchmark         the benchmark which will be evaluated by openscap
+ * @param resultsDir        the directory where artifacts will be moved
+ * @param profile           the XCCDF profile name
+ * @param xccdfVersion      XCCDF benchmark version (default 1.2)
+ * @param tailoringId       The id of your tailoring data (from the corresponding pillar)
+ */
+def openscapEval(master, target, evaltype, benchmark, resultsDir, profile = 'default', xccdfVersion = '1.2', tailoringId = 'None') {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    salt.runSaltProcessStep(master, target, 'oscap.eval', [evaltype, benchmark, results_dir = resultsDir, profile = profile, xccdf_version = xccdfVersion, tailoring_id= tailoringId])
+}
+
+/**
+ * Upload results to the security dashboard
+ *
+ * @param apiUrl        the security dashboard url
+ * @param file          the file to upload
+ * @param cloud_name    the cloud_name
+ * @param nodename      the scanned node name
+ */
+def uploadScanResultsToDashboard(apiUrl, results, cloud_name, nodename) {
+    def common = new com.mirantis.mk.Common()
+    def http = new com.mirantis.mk.Http()
+    def data = [:]
+
+    // Skip authorization until there is no authorization in the worp
+
+    // Get cloud_id
+    data['name'] = cloud_name
+    def cloudId = common.parseJSON(http.sendHttpPostRequest(apiUrl+'/environment', data))['id']
+    // Get report_id
+    data['env_uuid'] = cloudId
+    def reportId = common.parseJSON(http.sendHttpPostRequest(apiUrl+'/reports/openscap/', data))['id']
+
+    // Create node
+    def nodes = []
+    nodes.add[nodename]
+    data['nodes'] = nodes
+    http.sendHttpPostRequest(apiUrl+'/environment/'+cloudId+'/nodes', data)
+
+    // Upload results
+    data['results'] = results
+    data['node'] = nodename
+    http.sendHttpPostRequest(apiUrl+'/reports/openscap/'+reportId, data)
+}
diff --git a/src/com/mirantis/mk/Openstack.groovy b/src/com/mirantis/mk/Openstack.groovy
index 85f014f..37fc73e 100644
--- a/src/com/mirantis/mk/Openstack.groovy
+++ b/src/com/mirantis/mk/Openstack.groovy
@@ -47,6 +47,13 @@
     def openstack_latest_packages = [
         //XXX: hack to fix https://bugs.launchpad.net/ubuntu/+source/python-pip/+bug/1635463
         'cliff==2.8',
+        // NOTE(vsaienko): cmd2 is dependency for cliff, since we don't using upper-contstraints
+        // we have to pin cmd2 < 0.9.0 as later versions are not compatible with python2.
+        // TODO(vsaienko): use upper-constraints here, as in requirements we set only lowest library
+        //                 versions.
+        'cmd2<0.9.0;python_version=="2.7"',
+        'cmd2>=0.9.1;python_version=="3.4"',
+        'cmd2>=0.9.1;python_version=="3.5"',
         'python-openstackclient',
         'python-heatclient',
         'docutils'
@@ -373,6 +380,40 @@
 }
 
 /**
+ * Delete nova key pair
+ *
+ * @param env          Connection parameters for OpenStack API endpoint
+ * @param name         Name of the key pair to delete
+ * @param path         Optional path to the custom virtualenv
+ */
+def deleteKeyPair(env, name, path = null) {
+    def common = new com.mirantis.mk.Common()
+    common.infoMsg("Removing key pair ${name}")
+    def cmd = "openstack keypair delete ${name}"
+    runOpenstackCommand(cmd, env, path)
+}
+
+/**
+ * Get nova key pair
+ *
+ * @param env          Connection parameters for OpenStack API endpoint
+ * @param name         Name of the key pair to show
+ * @param path         Optional path to the custom virtualenv
+ */
+
+def getKeyPair(env, name, path = null) {
+    def common = new com.mirantis.mk.Common()
+    def cmd = "openstack keypair show ${name}"
+    def outputTable
+    try {
+        outputTable = runOpenstackCommand(cmd, env, path)
+    } catch (Exception e) {
+        common.infoMsg("Key pair ${name} not found")
+    }
+    return outputTable
+}
+
+/**
  * Stops all services that contain specific string (for example nova,heat, etc.)
  * @param env Salt Connection object or pepperEnv
  * @param probe single node on which to list service names
@@ -385,7 +426,7 @@
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
     for (s in services) {
-        def outputServicesStr = salt.getReturnValues(salt.cmdRun(env, "${probe}*", "service --status-all | grep ${s} | awk \'{print \$4}\'"))
+        def outputServicesStr = salt.getReturnValues(salt.cmdRun(env, probe, "service --status-all | grep ${s} | awk \'{print \$4}\'"))
         def servicesList = outputServicesStr.tokenize("\n").init()
         if (confirm) {
             if (servicesList) {
@@ -393,7 +434,7 @@
                     input message: "Click PROCEED to stop ${servicesList}. Otherwise click ABORT to skip stopping them."
                     for (name in servicesList) {
                         if (!name.contains('Salt command')) {
-                            salt.runSaltProcessStep(env, "${target}*", 'service.stop', ["${name}"])
+                            salt.runSaltProcessStep(env, target, 'service.stop', ["${name}"])
                         }
                     }
                 } catch (Exception er) {
@@ -404,7 +445,7 @@
             if (servicesList) {
                 for (name in servicesList) {
                     if (!name.contains('Salt command')) {
-                        salt.runSaltProcessStep(env, "${target}*", 'service.stop', ["${name}"])
+                        salt.runSaltProcessStep(env, target, 'service.stop', ["${name}"])
                     }
                 }
             }
@@ -413,6 +454,79 @@
 }
 
 /**
+ * Return intersection of globally installed services and those are
+ * defined on specific target according to theirs priorities.
+ *
+ * @param env     Salt Connection object or env
+ * @param target  The target node to get list of apps for.
+**/
+def getOpenStackUpgradeServices(env, target){
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+
+    def global_apps = salt.getConfig(env, 'I@salt:master:enabled:true', 'orchestration.upgrade.applications')
+    def node_apps = salt.getPillar(env, target, '__reclass__:applications')['return'][0].values()[0]
+    def node_sorted_apps = []
+    if ( !global_apps['return'][0].values()[0].isEmpty() ) {
+        Map<String,Integer> _sorted_apps = [:]
+        for (k in global_apps['return'][0].values()[0].keySet()) {
+            if (k in node_apps) {
+              _sorted_apps[k] = global_apps['return'][0].values()[0][k].values()[0].toInteger()
+            }
+        }
+        node_sorted_apps = common.SortMapByValueAsc(_sorted_apps).keySet()
+        common.infoMsg("Applications are placed in following order:"+node_sorted_apps)
+    } else {
+        common.errorMsg("No applications found.")
+    }
+
+  return node_sorted_apps
+}
+
+
+/**
+ * Run specified upgrade phase for all services on given node.
+ *
+ * @param env     Salt Connection object or env
+ * @param target  The target node to run states on.
+ * @param phase   The phase name to run.
+**/
+def runOpenStackUpgradePhase(env, target, phase){
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+
+    services = getOpenStackUpgradeServices(env, target)
+    def st
+
+    for (service in services){
+        st = "${service}.upgrade.${phase}".trim()
+        common.infoMsg("Running ${phase} for service ${st} on ${target}")
+        salt.enforceState(env, target, st)
+    }
+}
+
+
+/**
+ * Run OpenStack states on specified node.
+ *
+ * @param env     Salt Connection object or env
+ * @param target  The target node to run states on.
+**/
+def applyOpenstackAppsStates(env, target){
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+
+    services = getOpenStackUpgradeServices(env, target)
+    def st
+
+    for (service in services){
+        st = "${service}".trim()
+        common.infoMsg("Running ${st} on ${target}")
+        salt.enforceState(env, target, st)
+    }
+}
+
+/**
  * Restores Galera database
  * @param env Salt Connection object or pepperEnv
  * @return output of salt commands
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index 68b6954..265cfa0 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -4,69 +4,80 @@
  *
 */
 
-def validateFoundationInfra(master) {
+/**
+ * Function runs Salt states to check infra
+ * @param master Salt Connection object or pepperEnv
+ * @param extra_tgt Extra target - adds ability to address commands using extra targeting to different clouds, e.g.: salt -C 'I@keystone:server and *ogrudev-deploy-heat-os-ha-ovs-82*' ...
+ */
+def validateFoundationInfra(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
 
-    salt.cmdRun(master, 'I@salt:master' ,'salt-key')
-    salt.runSaltProcessStep(master, 'I@salt:minion', 'test.version')
-    salt.cmdRun(master, 'I@salt:master' ,'reclass-salt --top')
-    salt.runSaltProcessStep(master, 'I@reclass:storage', 'reclass.inventory')
-    salt.runSaltProcessStep(master, 'I@salt:minion', 'state.show_top')
+    salt.cmdRun(master, "I@salt:master ${extra_tgt}" ,'salt-key')
+    salt.runSaltProcessStep(master, "I@salt:minion ${extra_tgt}", 'test.version')
+    salt.cmdRun(master, "I@salt:master ${extra_tgt}" ,'reclass-salt --top')
+    salt.runSaltProcessStep(master, "I@reclass:storage ${extra_tgt}", 'reclass.inventory')
+    salt.runSaltProcessStep(master, "I@salt:minion ${extra_tgt}", 'state.show_top')
 }
 
-def installFoundationInfra(master, staticMgmtNet=false) {
+def installFoundationInfra(master, staticMgmtNet=false, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
 
     // NOTE(vsaienko) Apply reclass first, it may update cluster model
     // apply linux and salt.master salt.minion states afterwards to make sure
     // correct cluster model is used.
-    salt.enforceState(master, 'I@salt:master', ['reclass'])
+    salt.enforceState(master, "I@salt:master ${extra_tgt}", ['reclass'])
 
-    salt.enforceState(master, 'I@salt:master', ['linux.system'])
-    salt.enforceState(master, 'I@salt:master', ['salt.master'], true, false, null, false, 120, 2)
-    salt.fullRefresh(master, "*")
+    salt.enforceState(master, "I@salt:master ${extra_tgt}", ['linux.system'])
+    salt.enforceState(master, "I@salt:master ${extra_tgt}", ['salt.master'], true, false, null, false, 120, 2)
+    salt.fullRefresh(master, "* ${extra_tgt}")
 
-    salt.enforceState(master, 'I@salt:master', ['salt.minion'], true, false, null, false, 60, 2)
-    salt.enforceState(master, 'I@salt:master', ['salt.minion'])
-    salt.fullRefresh(master, "*")
-    salt.enforceState(master, '*', ['linux.network.proxy'], true, false, null, false, 60, 2)
+    salt.enforceState(master, "I@salt:master ${extra_tgt}", ['salt.minion'], true, false, null, false, 60, 2)
+    salt.enforceState(master, "I@salt:master ${extra_tgt}", ['salt.minion'])
+    salt.fullRefresh(master, "* ${extra_tgt}")
+    salt.enforceState(master, "* ${extra_tgt}", ['linux.network.proxy'], true, false, null, false, 60, 2)
     try {
-        salt.enforceState(master, '*', ['salt.minion.base'], true, false, null, false, 60, 2)
+        salt.enforceState(master, "* ${extra_tgt}", ['salt.minion.base'], true, false, null, false, 60, 2)
         sleep(5)
     } catch (Throwable e) {
         common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
     }
-    salt.enforceState(master, '*', ['linux.system'])
-    if (staticMgmtNet) {
-        salt.runSaltProcessStep(master, '*', 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
+    common.retry(2,5){
+        salt.enforceState(master, "* ${extra_tgt}", ['linux.system'])
     }
-    salt.enforceState(master, 'I@linux:network:interface', ['linux.network.interface'])
+    if (staticMgmtNet) {
+        salt.runSaltProcessStep(master, "* ${extra_tgt}", 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
+    }
+    common.retry(2,5){
+        salt.enforceState(master, "I@linux:network:interface ${extra_tgt}", ['linux.network.interface'])
+    }
     sleep(5)
-    salt.enforceState(master, 'I@linux:system', ['linux', 'openssh', 'ntp', 'rsyslog'])
-    salt.enforceState(master, '*', ['salt.minion'], true, false, null, false, 60, 2)
+    salt.enforceState(master, "I@linux:system ${extra_tgt}", ['linux', 'openssh', 'ntp', 'rsyslog'])
+    salt.enforceState(master, "* ${extra_tgt}", ['salt.minion'], true, false, null, false, 60, 2)
     sleep(5)
 
-    salt.fullRefresh(master, "*")
-    salt.runSaltProcessStep(master, '*', 'mine.update', [], null, true)
-    salt.enforceState(master, '*', ['linux.network.host'])
+    salt.fullRefresh(master, "* ${extra_tgt}")
+    salt.runSaltProcessStep(master, "* ${extra_tgt}", 'mine.update', [], null, true)
+    salt.enforceState(master, "* ${extra_tgt}", ['linux.network.host'])
 
     // Install and configure iptables
-    if (salt.testTarget(master, 'I@iptables:service')) {
-        salt.enforceState(master, 'I@iptables:service', 'iptables')
-    }
+    salt.enforceStateWithTest(master, "I@iptables:service ${extra_tgt}", 'iptables')
 
     // Install and configure logrotate
-    if (salt.testTarget(master, 'I@logrotate:server')) {
-        salt.enforceState(master, 'I@logrotate:server', 'logrotate')
-    }
+    salt.enforceStateWithTest(master, "I@logrotate:server ${extra_tgt}", 'logrotate')
+
+    // Install and configure auditd
+    salt.enforceStateWithTest(master, "I@auditd:service ${extra_tgt}", 'auditd')
+
+    // Install and configure openscap
+    salt.enforceStateWithTest(master, "I@openscap:service ${extra_tgt}", 'openscap')
 }
 
-def installFoundationInfraOnTarget(master, target, staticMgmtNet=false) {
+def installFoundationInfraOnTarget(master, target, staticMgmtNet=false, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
 
-    salt.enforceState(master, 'I@salt:master', ['reclass'], true, false, null, false, 120, 2)
+    salt.enforceState(master, "I@salt:master ${extra_tgt}", ['reclass'], true, false, null, false, 120, 2)
     salt.fullRefresh(master, target)
     salt.enforceState(master, target, ['linux.network.proxy'], true, false, null, false, 60, 2)
     try {
@@ -75,7 +86,9 @@
     } catch (Throwable e) {
         common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
     }
-    salt.enforceState(master, target, ['linux.system'])
+    common.retry(2,5){
+        salt.enforceState(master, target, ['linux.system'])
+    }
     if (staticMgmtNet) {
         salt.runSaltProcessStep(master, target, 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
     }
@@ -91,40 +104,38 @@
     salt.enforceState(master, target, ['linux.network.host'])
 }
 
-def installInfraKvm(master) {
+def installInfraKvm(master, extra_tgt = '') {
     def common = new com.mirantis.mk.Common()
     def salt = new com.mirantis.mk.Salt()
-    salt.fullRefresh(master, 'I@linux:system')
-    def infra_conpund = 'I@salt:control'
+    def infra_compound = "I@salt:control ${extra_tgt}"
     def minions = []
     def wait_timeout = 10
     def retries = wait_timeout * 30
 
-    salt.enforceState(master, 'I@salt:control', ['salt.minion'], true, false, null, false, 60, 2)
-    salt.enforceState(master, 'I@salt:control', ['linux.system', 'linux.network', 'ntp', 'rsyslog'])
-    salt.enforceState(master, 'I@salt:control', 'libvirt')
-    salt.enforceState(master, 'I@salt:control', 'salt.control')
+    salt.fullRefresh(master, "I@linux:system ${extra_tgt}")
+    salt.enforceState(master, "I@salt:control ${extra_tgt}", ['salt.minion'], true, false, null, false, 60, 2)
+    salt.enforceState(master, "I@salt:control ${extra_tgt}", ['linux.system', 'linux.network', 'ntp', 'rsyslog'])
+    salt.enforceState(master, "I@salt:control ${extra_tgt}", 'libvirt')
+    salt.enforceState(master, "I@salt:control ${extra_tgt}", 'salt.control')
 
-    timeout(wait_timeout) {
-        common.infoMsg("Waiting for minions to come up...")
-        if (salt.testTarget(master, infra_conpund)) {
-            // Gathering minions
-            for ( infra_node in salt.getMinionsSorted(master, infra_conpund) ) {
-                def pillar = salt.getPillar(master, infra_node, 'salt:control:cluster')
-                if ( !pillar['return'].isEmpty() ) {
-                    for ( cluster in pillar['return'][0].values() ) {
-                        def engine = cluster.values()[0]['engine']
-                        def domain = cluster.values()[0]['domain']
-                        def node = cluster.values()[0]['node']
-                        if ( engine == "virt" ) {
-                            def nodes = node.values()
-                            if ( !nodes.isEmpty() ) {
-                                for ( vm in nodes ) {
-                                    if ( vm['name'] != null ) {
-                                        def vm_fqdn = vm['name'] + '.' + domain
-                                        if ( !minions.contains(vm_fqdn) ) {
-                                            minions.add(vm_fqdn)
-                                        }
+    common.infoMsg("Building minions list...")
+    if (salt.testTarget(master, infra_compound)) {
+        // Gathering minions
+        for ( infra_node in salt.getMinionsSorted(master, infra_compound) ) {
+            def pillar = salt.getPillar(master, infra_node, 'salt:control:cluster')
+            if ( !pillar['return'].isEmpty() ) {
+                for ( cluster in pillar['return'][0].values() ) {
+                    def engine = cluster.values()[0]['engine']
+                    def domain = cluster.values()[0]['domain']
+                    def node = cluster.values()[0]['node']
+                    if ( engine == "virt" ) {
+                        def nodes = node.values()
+                        if ( !nodes.isEmpty() ) {
+                            for ( vm in nodes ) {
+                                if ( vm['name'] != null ) {
+                                    def vm_fqdn = vm['name'] + '.' + domain
+                                    if ( !minions.contains(vm_fqdn) ) {
+                                        minions.add(vm_fqdn)
                                     }
                                 }
                             }
@@ -133,556 +144,558 @@
                 }
             }
         }
+    }
 
-        def minions_compound = minions.join(' or ')
-        common.infoMsg('Waiting for next minions to register: ' + minions_compound,)
-        salt.minionsPresentFromList(master, 'I@salt:master', minions, true, null, true, retries, 1)
-        common.infoMsg('Waiting for minions to respond')
-        salt.minionsReachable(master, 'I@salt:master', minions_compound )
+    def minions_compound = minions.join(' or ')
 
+    common.infoMsg("Waiting for next minions to register within ${wait_timeout} minutes: " + minions_compound)
+    timeout(time: wait_timeout, unit: 'MINUTES') {
+        salt.minionsPresentFromList(master, "I@salt:master ${extra_tgt}", minions, true, null, true, retries, 1)
+    }
+
+    common.infoMsg('Waiting for minions to respond')
+    timeout(time: wait_timeout, unit: 'MINUTES') {
+        salt.minionsReachable(master, "I@salt:master ${extra_tgt}", minions_compound)
     }
 
     common.infoMsg("All minions are up.")
-    salt.fullRefresh(master, '* and not kvm*')
+    salt.fullRefresh(master, "* and not kvm* ${extra_tgt}")
 
 }
 
-def installInfra(master) {
+def installInfra(master, extra_tgt = '') {
     def common = new com.mirantis.mk.Common()
     def salt = new com.mirantis.mk.Salt()
+    def first_target
 
     // Install glusterfs
-    if (salt.testTarget(master, 'I@glusterfs:server')) {
-        salt.enforceState(master, 'I@glusterfs:server', 'glusterfs.server.service')
+    if (salt.testTarget(master, "I@glusterfs:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@glusterfs:server ${extra_tgt}", 'glusterfs.server.service')
 
-        salt.enforceState(master, 'I@glusterfs:server and *01*', 'glusterfs.server.setup', true, true, null, false, -1, 5)
+        salt.enforceState(master, "I@glusterfs:server:role:primary ${extra_tgt}", 'glusterfs.server.setup', true, true, null, false, -1, 5)
         sleep(10)
-        salt.cmdRun(master, 'I@glusterfs:server', "gluster peer status; gluster volume status")
+        salt.cmdRun(master, "I@glusterfs:server ${extra_tgt}", "gluster peer status; gluster volume status")
     }
 
     // Ensure glusterfs clusters is ready
-    if (salt.testTarget(master, 'I@glusterfs:client')) {
-        salt.enforceState(master, 'I@glusterfs:client', 'glusterfs.client')
-    }
+    salt.enforceStateWithTest(master, "I@glusterfs:client ${extra_tgt}", 'glusterfs.client', "", true, true, null, false, -1, 2)
 
     // Install galera
-    if (salt.testTarget(master, 'I@galera:master') || salt.testTarget(master, 'I@galera:slave')) {
-        salt.enforceState(master, 'I@galera:master', 'galera', true, true, null, false, -1, 2)
-        salt.enforceState(master, 'I@galera:slave', 'galera', true, true, null, false, -1, 2)
+    if (salt.testTarget(master, "I@galera:master ${extra_tgt}") || salt.testTarget(master, "I@galera:slave ${extra_tgt}")) {
+        salt.enforceState(master, "I@galera:master ${extra_tgt}", 'galera', true, true, null, false, -1, 2)
+        salt.enforceState(master, "I@galera:slave ${extra_tgt}", 'galera', true, true, null, false, -1, 2)
 
         // Check galera status
-        salt.runSaltProcessStep(master, 'I@galera:master', 'mysql.status')
-        salt.runSaltProcessStep(master, 'I@galera:slave', 'mysql.status')
+        salt.runSaltProcessStep(master, "I@galera:master ${extra_tgt}", 'mysql.status')
+        salt.runSaltProcessStep(master, "I@galera:slave ${extra_tgt}", 'mysql.status')
     // If galera is not enabled check if we need to install mysql:server
-    } else if (salt.testTarget(master, 'I@mysql:server')){
-        salt.enforceState(master, 'I@mysql:server', 'mysql.server')
-        if (salt.testTarget(master, 'I@mysql:client')){
-            salt.enforceState(master, 'I@mysql:client', 'mysql.client')
-        }
+    } else {
+    salt.enforceStateWithTest(master, "I@mysql:server ${extra_tgt}", 'mysql.server')
+    salt.enforceStateWithTest(master, "I@mysql:client ${extra_tgt}", 'mysql.client')
     }
-    installBackup(master, 'mysql')
+    installBackup(master, 'mysql', extra_tgt)
 
     // Install docker
-    if (salt.testTarget(master, 'I@docker:host')) {
-        salt.enforceState(master, 'I@docker:host', 'docker.host')
-        salt.cmdRun(master, 'I@docker:host', 'docker ps')
+    if (salt.testTarget(master, "I@docker:host ${extra_tgt}")) {
+        salt.enforceState(master, "I@docker:host ${extra_tgt}", 'docker.host', true, true, null, false, -1, 3)
+        salt.cmdRun(master, "I@docker:host and I@docker:host:enabled:true ${extra_tgt}", 'docker ps')
     }
 
     // Install keepalived
-    if (salt.testTarget(master, 'I@keepalived:cluster')) {
-        salt.enforceState(master, 'I@keepalived:cluster and *01*', 'keepalived')
-        salt.enforceState(master, 'I@keepalived:cluster', 'keepalived')
+    if (salt.testTarget(master, "I@keepalived:cluster ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@keepalived:cluster ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'keepalived')
+        salt.enforceState(master, "I@keepalived:cluster ${extra_tgt}", 'keepalived')
     }
 
     // Install rabbitmq
-    if (salt.testTarget(master, 'I@rabbitmq:server')) {
-        salt.enforceState(master, 'I@rabbitmq:server', 'rabbitmq', true, true, null, false, -1, 2)
+    if (salt.testTarget(master, "I@rabbitmq:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@rabbitmq:server ${extra_tgt}", 'rabbitmq', true, true, null, false, -1, 2)
 
         // Check the rabbitmq status
         common.retry(3,5){
-             salt.cmdRun(master, 'I@rabbitmq:server', 'rabbitmqctl cluster_status')
+             salt.cmdRun(master, "I@rabbitmq:server ${extra_tgt}", 'rabbitmqctl cluster_status')
         }
     }
 
     // Install haproxy
-    if (salt.testTarget(master, 'I@haproxy:proxy')) {
-        salt.enforceState(master, 'I@haproxy:proxy', 'haproxy')
-        salt.runSaltProcessStep(master, 'I@haproxy:proxy', 'service.status', ['haproxy'])
-        salt.runSaltProcessStep(master, 'I@haproxy:proxy', 'service.restart', ['rsyslog'])
+    if (salt.testTarget(master, "I@haproxy:proxy ${extra_tgt}")) {
+        salt.enforceState(master, "I@haproxy:proxy ${extra_tgt}", 'haproxy')
+        salt.runSaltProcessStep(master, "I@haproxy:proxy ${extra_tgt}", 'service.status', ['haproxy'])
+        salt.runSaltProcessStep(master, "I@haproxy:proxy ${extra_tgt}", 'service.restart', ['rsyslog'])
     }
 
     // Install memcached
-    if (salt.testTarget(master, 'I@memcached:server')) {
-        salt.enforceState(master, 'I@memcached:server', 'memcached')
-    }
+    salt.enforceStateWithTest(master, "I@memcached:server ${extra_tgt}", 'memcached')
 
     // Install etcd
-    if (salt.testTarget(master, 'I@etcd:server')) {
-        salt.enforceState(master, 'I@etcd:server', 'etcd.server.service')
+    if (salt.testTarget(master, "I@etcd:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@etcd:server ${extra_tgt}", 'etcd.server.service')
         common.retry(3,5){
-            salt.cmdRun(master, 'I@etcd:server', '. /var/lib/etcd/configenv && etcdctl cluster-health')
+            salt.cmdRun(master, "I@etcd:server ${extra_tgt}", '. /var/lib/etcd/configenv && etcdctl cluster-health')
         }
     }
 
     // Install redis
-    if (salt.testTarget(master, 'I@redis:server')) {
-        if (salt.testTarget(master, 'I@redis:cluster:role:master')) {
-            salt.enforceState(master, 'I@redis:cluster:role:master', 'redis')
-        }
-        salt.enforceState(master, 'I@redis:server', 'redis')
+    if (salt.testTarget(master, "I@redis:server ${extra_tgt}")) {
+        salt.enforceStateWithTest(master, "I@redis:cluster:role:master ${extra_tgt}", 'redis')
+        salt.enforceState(master, "I@redis:server ${extra_tgt}", 'redis')
     }
-    installBackup(master, 'common')
+
+    // Install DNS services
+    if (salt.testTarget(master, "I@bind:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@bind:server ${extra_tgt}", 'bind.server')
+    }
+    if (salt.testTarget(master, "I@powerdns:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@powerdns:server ${extra_tgt}", 'powerdns.server')
+    }
+
+    installBackup(master, 'common', extra_tgt)
 }
 
-def installOpenstackInfra(master) {
+def installOpenstackInfra(master, extra_tgt = '') {
     def common = new com.mirantis.mk.Common()
     common.warningMsg("You calling orchestrate.installOpenstackInfra(). This function is deprecated please use orchestrate.installInfra() directly")
-    installInfra(master)
+    installInfra(master, extra_tgt)
 }
 
 
-def installOpenstackControl(master) {
+def installOpenstackControl(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
+    def first_target
 
     // Install horizon dashboard
-    if (salt.testTarget(master, 'I@horizon:server')) {
-        salt.enforceState(master, 'I@horizon:server', 'horizon')
-    }
+    salt.enforceStateWithTest(master, "I@horizon:server ${extra_tgt}", 'horizon')
     // Install sphinx server
-    if (salt.testTarget(master, 'I@sphinx:server')) {
-        salt.enforceState(master, 'I@sphinx:server', 'sphinx')
-    }
-    if (salt.testTarget(master, 'I@nginx:server')) {
-        salt.enforceState(master, 'I@nginx:server', 'salt.minion')
-        salt.enforceState(master, 'I@nginx:server', 'nginx')
-    }
+    salt.enforceStateWithTest(master, "I@sphinx:server ${extra_tgt}", 'sphinx')
+    salt.enforceStateWithTest(master, "I@nginx:server ${extra_tgt}", 'salt.minion')
+    salt.enforceStateWithTest(master, "I@nginx:server ${extra_tgt}", 'nginx')
 
     // setup keystone service
-    if (salt.testTarget(master, 'I@keystone:server')) {
-        salt.enforceState(master, 'I@keystone:server and *01*', 'keystone.server')
-        salt.enforceState(master, 'I@keystone:server', 'keystone.server')
+    if (salt.testTarget(master, "I@keystone:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@keystone:server:role:primary ${extra_tgt}", 'keystone.server')
+        salt.enforceState(master, "I@keystone:server ${extra_tgt}", 'keystone.server')
         // populate keystone services/tenants/roles/users
 
         // keystone:client must be called locally
         //salt.runSaltProcessStep(master, 'I@keystone:client', 'cmd.run', ['salt-call state.sls keystone.client'], null, true)
-        salt.runSaltProcessStep(master, 'I@keystone:server', 'service.restart', ['apache2'])
+        salt.runSaltProcessStep(master, "I@keystone:server ${extra_tgt}", 'service.restart', ['apache2'])
         sleep(30)
     }
-    if (salt.testTarget(master, 'I@keystone:client')) {
-        salt.enforceState(master, 'I@keystone:client and *01*', 'keystone.client')
-        salt.enforceState(master, 'I@keystone:client', 'keystone.client')
+    if (salt.testTarget(master, "I@keystone:client ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@keystone:client ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'keystone.client')
+        salt.enforceState(master, "I@keystone:client ${extra_tgt}", 'keystone.client')
     }
-    if (salt.testTarget(master, 'I@keystone:server')) {
+    if (salt.testTarget(master, "I@keystone:server ${extra_tgt}")) {
         common.retry(3,5){
-            salt.cmdRun(master, 'I@keystone:server', '. /root/keystonercv3; openstack service list')
+            salt.cmdRun(master, "I@keystone:server ${extra_tgt}", '. /root/keystonercv3; openstack service list')
         }
     }
 
     // Install glance
-    if (salt.testTarget(master, 'I@glance:server')) {
-        //runSaltProcessStep(master, 'I@glance:server', 'state.sls', ['glance.server'], 1)
-        salt.enforceState(master, 'I@glance:server and *01*', 'glance.server')
-       salt.enforceState(master, 'I@glance:server', 'glance.server')
-    }
+    salt.enforceStateWithTest(master, "I@glance:server:role:primary ${extra_tgt}", 'glance.server', "I@glance:server ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@glance:server ${extra_tgt}", 'glance.server')
 
     // Check glance service
-    if (salt.testTarget(master, 'I@glance:server')){
+    if (salt.testTarget(master, "I@glance:server ${extra_tgt}")) {
         common.retry(3,5){
-            salt.cmdRun(master, 'I@keystone:server','. /root/keystonercv3; glance image-list')
+            salt.cmdRun(master, "I@keystone:server ${extra_tgt}", '. /root/keystonercv3; glance image-list')
         }
     }
 
     // Create glance resources
-    if (salt.testTarget(master, 'I@glance:client')) {
-        salt.enforceState(master, 'I@glance:client', 'glance.client')
-    }
+    salt.enforceStateWithTest(master, "I@glance:client ${extra_tgt}", 'glance.client')
 
     // Install and check nova service
-    if (salt.testTarget(master, 'I@nova:controller')) {
-        // run on first node first
-        salt.enforceState(master, 'I@nova:controller and *01*', 'nova.controller')
-        salt.enforceState(master, 'I@nova:controller', 'nova.controller')
-        if (salt.testTarget(master, 'I@keystone:server')) {
-           common.retry(3,5){
-               salt.cmdRun(master, 'I@keystone:server', '. /root/keystonercv3; nova service-list')
-           }
+    // run on first node first
+    salt.enforceStateWithTest(master, "I@nova:controller:role:primary ${extra_tgt}", 'nova.controller', "I@nova:controller ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@nova:controller ${extra_tgt}", 'nova.controller')
+    if (salt.testTarget(master, "I@keystone:server and I@nova:controller ${extra_tgt}")) {
+        common.retry(3,5){
+            salt.cmdRun(master, "I@keystone:server ${extra_tgt}", '. /root/keystonercv3; nova service-list')
         }
     }
 
+
     // Create nova resources
-    if (salt.testTarget(master, 'I@nova:client')) {
-        salt.enforceState(master, 'I@nova:client', 'nova.client')
-    }
+    salt.enforceStateWithTest(master, "I@nova:client ${extra_tgt}", 'nova.client')
 
     // Install and check cinder service
-    if (salt.testTarget(master, 'I@cinder:controller')) {
-        // run on first node first
-        salt.enforceState(master, 'I@cinder:controller and *01*', 'cinder')
-        salt.enforceState(master, 'I@cinder:controller', 'cinder')
-        if (salt.testTarget(master, 'I@keystone:server')) {
-            common.retry(3,5){
-                salt.cmdRun(master, 'I@keystone:server', '. /root/keystonercv3; cinder list')
-            }
+    // run on first node first
+    salt.enforceStateWithTest(master, "I@cinder:controller:role:primary ${extra_tgt}", 'cinder', "I@cinder:controller ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@cinder:controller ${extra_tgt}", 'cinder')
+    if (salt.testTarget(master, "I@keystone:server and I@cinder:controller ${extra_tgt}")) {
+        common.retry(3,5){
+            salt.cmdRun(master, "I@keystone:server ${extra_tgt}", '. /root/keystonercv3; cinder list')
         }
     }
 
     // Install neutron service
-    if (salt.testTarget(master, 'I@neutron:server')) {
-        // run on first node first
-        salt.enforceState(master, 'I@neutron:server and *01*', 'neutron.server')
-        salt.enforceState(master, 'I@neutron:server', 'neutron.server')
-        if (salt.testTarget(master, 'I@keystone:server')) {
-            common.retry(3,5){
-                salt.cmdRun(master, 'I@keystone:server','. /root/keystonercv3; neutron agent-list')
-            }
+    // run on first node first
+    salt.enforceStateWithTest(master, "I@neutron:server:role:primary ${extra_tgt}", 'neutron.server', "I@neutron:server ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@neutron:server ${extra_tgt}", 'neutron.server')
+    if (salt.testTarget(master, "I@keystone:server and I@neutron:server ${extra_tgt}")) {
+        common.retry(3,5){
+            salt.cmdRun(master, "I@keystone:server ${extra_tgt}",'. /root/keystonercv3; neutron agent-list')
         }
     }
 
     // Install heat service
-    if (salt.testTarget(master, 'I@heat:server')) {
-        // run on first node first
-        salt.enforceState(master, 'I@heat:server and *01*', 'heat')
-        salt.enforceState(master, 'I@heat:server', 'heat')
-        if (salt.testTarget(master, 'I@keystone:server')) {
-            common.retry(3,5){
-                salt.cmdRun(master, 'I@keystone:server', '. /root/keystonercv3; heat resource-type-list')
-            }
+    salt.enforceStateWithTest(master, "I@heat:server:role:primary ${extra_tgt}", 'heat', "I@heat:server ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@heat:server ${extra_tgt}", 'heat')
+    if (salt.testTarget(master, "I@keystone:server and I@heat:server ${extra_tgt}")) {
+        common.retry(3,5){
+            salt.cmdRun(master, "I@keystone:server ${extra_tgt}", '. /root/keystonercv3; openstack orchestration resource type list')
         }
     }
 
     // Restart nova api
-    if (salt.testTarget(master, 'I@nova:controller')) {
-        salt.runSaltProcessStep(master, 'I@nova:controller', 'service.restart', ['nova-api'])
+    if (salt.testTarget(master, "I@nova:controller ${extra_tgt}")) {
+        salt.runSaltProcessStep(master, "I@nova:controller ${extra_tgt}", 'service.restart', ['nova-api'])
     }
 
     // Install ironic service
-    if (salt.testTarget(master, 'I@ironic:api')) {
-        salt.enforceState(master, 'I@ironic:api and *01*', 'ironic.api')
-        salt.enforceState(master, 'I@ironic:api', 'ironic.api')
-    }
+    salt.enforceStateWithTest(master, "I@ironic:api:role:primary ${extra_tgt}", 'ironic.api', "I@ironic:api ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@ironic:api ${extra_tgt}", 'ironic.api')
 
     // Install manila service
-    if (salt.testTarget(master, 'I@manila:api')) {
-        salt.enforceState(master, 'I@manila:api and *01*', 'manila.api')
-        salt.enforceState(master, 'I@manila:api', 'manila.api')
-    }
-    if (salt.testTarget(master, 'I@manila:scheduler')) {
-        salt.enforceState(master, 'I@manila:scheduler', 'manila.scheduler')
-    }
+    salt.enforceStateWithTest(master, "I@manila:api:role:primary ${extra_tgt}", 'manila.api', "I@manila:api ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@manila:api ${extra_tgt}", 'manila.api')
+    salt.enforceStateWithTest(master, "I@manila:scheduler ${extra_tgt}", 'manila.scheduler')
 
     // Install designate services
-    if (salt.testTarget(master, 'I@designate:server:enabled')) {
-        if (salt.testTarget(master, 'I@designate:server:backend:bind9')) {
-            salt.enforceState(master, 'I@bind:server', 'bind.server')
-        }
-        if (salt.testTarget(master, 'I@designate:server:backend:pdns4')) {
-            salt.enforceState(master, 'I@powerdns:server', 'powerdns.server')
-        }
-        salt.enforceState(master, 'I@designate:server and *01*', 'designate.server')
-        salt.enforceState(master, 'I@designate:server', 'designate')
+    if (salt.testTarget(master, "I@designate:server:enabled ${extra_tgt}")) {
+        salt.enforceState(master, "I@designate:server:role:primary ${extra_tgt}", 'designate.server')
+        salt.enforceState(master, "I@designate:server ${extra_tgt}", 'designate')
     }
 
     // Install octavia api service
-    if (salt.testTarget(master, 'I@octavia:api')) {
-        salt.enforceState(master, 'I@octavia:api and *01*', 'octavia')
-        salt.enforceState(master, 'I@octavia:api', 'octavia')
-    }
+    salt.enforceStateWithTest(master, "I@octavia:api:role:primary ${extra_tgt}", 'octavia.api', "I@octavia:api ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@octavia:api ${extra_tgt}", 'octavia.api')
 
     // Install DogTag server service
-    if (salt.testTarget(master, 'I@dogtag:server')) {
-        salt.enforceState(master, 'I@dogtag:server and *01*', 'dogtag.server')
-        salt.enforceState(master, 'I@dogtag:server', 'dogtag.server')
-    }
+    salt.enforceStateWithTest(master, "I@dogtag:server:role:master ${extra_tgt}", 'dogtag.server', "I@dogtag:server ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@dogtag:server ${extra_tgt}", 'dogtag.server')
 
     // Install barbican server service
-    if (salt.testTarget(master, 'I@barbican:server')) {
-        salt.enforceState(master, 'I@barbican:server and *01*', 'barbican.server')
-        salt.enforceState(master, 'I@barbican:server', 'barbican.server')
-    }
+    salt.enforceStateWithTest(master, "I@barbican:server:role:primary ${extra_tgt}", 'barbican.server', "I@barbican:server ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@barbican:server ${extra_tgt}", 'barbican.server')
+
     // Install barbican client
-    if (salt.testTarget(master, 'I@barbican:client')) {
-        salt.enforceState(master, 'I@barbican:client', 'barbican.client')
-    }
+    salt.enforceStateWithTest(master, "I@barbican:client ${extra_tgt}", 'barbican.client')
 
     // Install gnocchi server
-    if (salt.testTarget(master, 'I@gnocchi:server')) {
-        salt.enforceState(master, 'I@gnocchi:server and *01*', 'gnocchi.server')
-        salt.enforceState(master, 'I@gnocchi:server', 'gnocchi.server')
+    salt.enforceStateWithTest(master, "I@gnocchi:server:role:primary ${extra_tgt}", 'gnocchi.server', "I@gnocchi:server ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@gnocchi:server ${extra_tgt}", 'gnocchi.server')
+
+    // Apply gnocchi client state to create gnocchi archive policies, due to possible
+    // races, apply on the first node initially
+    if (salt.testTarget(master, "I@gnocchi:client ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@gnocchi:client ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'gnocchi.client')
+        salt.enforceState(master, "I@gnocchi:client ${extra_tgt}", 'gnocchi.client')
     }
 
     // Install gnocchi statsd
-    if (salt.testTarget(master, 'I@gnocchi:statsd')) {
-        salt.enforceState(master, 'I@gnocchi:statsd and *01*', 'gnocchi.statsd')
-        salt.enforceState(master, 'I@gnocchi:statsd', 'gnocchi.statsd')
+    if (salt.testTarget(master, "I@gnocchi:statsd ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@gnocchi:statsd ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'gnocchi.statsd')
+        salt.enforceState(master, "I@gnocchi:statsd ${extra_tgt}", 'gnocchi.statsd')
     }
 
     // Install panko server
-    if (salt.testTarget(master, 'I@panko:server')) {
-        salt.enforceState(master, 'I@panko:server and *01*', 'panko')
-        salt.enforceState(master, 'I@panko:server', 'panko')
+    if (salt.testTarget(master, "I@panko:server ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@panko:server ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'panko')
+        salt.enforceState(master, "I@panko:server ${extra_tgt}", 'panko')
     }
 
     // Install ceilometer server
-    if (salt.testTarget(master, 'I@ceilometer:server')) {
-        salt.enforceState(master, 'I@ceilometer:server and *01*', 'ceilometer')
-        salt.enforceState(master, 'I@ceilometer:server', 'ceilometer')
-    }
+    salt.enforceStateWithTest(master, "I@ceilometer:server:role:primary ${extra_tgt}", 'ceilometer', "I@ceilometer:server ${extra_tgt}")
+    salt.enforceStateWithTest(master, "I@ceilometer:server ${extra_tgt}", 'ceilometer')
 
     // Install aodh server
-    if (salt.testTarget(master, 'I@aodh:server')) {
-        salt.enforceState(master, 'I@aodh:server and *01*', 'aodh')
-        salt.enforceState(master, 'I@aodh:server', 'aodh')
+    if (salt.testTarget(master, "I@aodh:server ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@aodh:server ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'aodh')
+        salt.enforceState(master, "I@aodh:server ${extra_tgt}", 'aodh')
     }
 }
 
 
-def installIronicConductor(master){
+def installIronicConductor(master, extra_tgt = ''){
     def salt = new com.mirantis.mk.Salt()
 
-    if (salt.testTarget(master, 'I@ironic:conductor')) {
-        salt.enforceState(master, 'I@ironic:conductor', 'ironic.conductor')
-        salt.enforceState(master, 'I@ironic:conductor', 'apache')
-    }
-    if (salt.testTarget(master, 'I@tftpd_hpa:server')) {
-        salt.enforceState(master, 'I@tftpd_hpa:server', 'tftpd_hpa')
+    salt.enforceStateWithTest(master, "I@ironic:conductor ${extra_tgt}", 'ironic.conductor')
+    salt.enforceStateWithTest(master, "I@ironic:conductor ${extra_tgt}", 'apache')
+    salt.enforceStateWithTest(master, "I@tftpd_hpa:server ${extra_tgt}", 'tftpd_hpa')
+
+    if (salt.testTarget(master, "I@nova:compute ${extra_tgt}")) {
+        salt.runSaltProcessStep(master, "I@nova:compute ${extra_tgt}", 'service.restart', ['nova-compute'])
     }
 
-    if (salt.testTarget(master, 'I@nova:compute')) {
-        salt.runSaltProcessStep(master, 'I@nova:compute', 'service.restart', ['nova-compute'])
-    }
-
-    if (salt.testTarget(master, 'I@baremetal_simulator:enabled')) {
-        salt.enforceState(master, 'I@baremetal_simulator:enabled', 'baremetal_simulator')
-    }
-    if (salt.testTarget(master, 'I@ironic:client')) {
-        salt.enforceState(master, 'I@ironic:client', 'ironic.client')
-    }
+    salt.enforceStateWithTest(master, "I@baremetal_simulator:enabled ${extra_tgt}", 'baremetal_simulator')
+    salt.enforceStateWithTest(master, "I@ironic:client ${extra_tgt}", 'ironic.client')
 }
 
-def installManilaShare(master){
+def installManilaShare(master, extra_tgt = ''){
     def salt = new com.mirantis.mk.Salt()
 
-    if (salt.testTarget(master, 'I@manila:share')) {
-        salt.enforceState(master, 'I@manila:share', 'manila.share')
-    }
-    if (salt.testTarget(master, 'I@manila:data')) {
-        salt.enforceState(master, 'I@manila:data', 'manila.data')
-    }
-
-    if (salt.testTarget(master, 'I@manila:client')) {
-        salt.enforceState(master, 'I@manila:client', 'manila.client')
-    }
+    salt.enforceStateWithTest(master, "I@manila:share ${extra_tgt}", 'manila.share')
+    salt.enforceStateWithTest(master, "I@manila:data ${extra_tgt}", 'manila.data')
+    salt.enforceStateWithTest(master, "I@manila:client ${extra_tgt}", 'manila.client')
 }
 
 
-def installOpenstackNetwork(master, physical = "false") {
+def installOpenstackNetwork(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     //run full neutron state on neutron.gateway - this will install
     //neutron agents in addition to neutron server. Once neutron agents
     //are up neutron resources can be created without hitting the situation when neutron resources are created
     //prior to neutron agents which results in creating ports in non-usable state
-    if (salt.testTarget(master, 'I@neutron:gateway')) {
-            salt.enforceState(master, 'I@neutron:gateway', 'neutron')
-    }
+    salt.enforceStateWithTest(master, "I@neutron:gateway ${extra_tgt}", 'neutron')
 
     // Create neutron resources - this step was moved here to ensure that
     //neutron resources are created after neutron agens are up. In this case neutron ports will be in
     //usable state. More information: https://bugs.launchpad.net/neutron/+bug/1399249
-    if (salt.testTarget(master, 'I@neutron:client')) {
-        salt.enforceState(master, 'I@neutron:client', 'neutron.client')
-    }
+    salt.enforceStateWithTest(master, "I@neutron:client ${extra_tgt}", 'neutron.client')
 
-    salt.enforceHighstate(master, 'I@neutron:gateway')
+    salt.enforceHighstate(master, "I@neutron:gateway ${extra_tgt}")
 
     // install octavia manager services
-    if (salt.testTarget(master, 'I@octavia:manager')) {
-        salt.runSaltProcessStep(master, 'I@salt:master', 'mine.update', ['*'])
-        salt.enforceState(master, 'I@octavia:manager', 'octavia')
-        salt.enforceState(master, 'I@octavia:manager', 'salt.minion.ca')
-        salt.enforceState(master, 'I@octavia:manager', 'salt.minion.cert')
+    if (salt.testTarget(master, "I@octavia:manager ${extra_tgt}")) {
+        salt.runSaltProcessStep(master, "I@salt:master ${extra_tgt}", 'mine.update', ['*'])
+        salt.enforceState(master, "I@octavia:manager ${extra_tgt}", 'octavia.manager')
+        salt.enforceState(master, "I@octavia:manager ${extra_tgt}", 'salt.minion.ca')
+        salt.enforceState(master, "I@octavia:manager ${extra_tgt}", 'salt.minion.cert')
     }
 }
 
 
-def installOpenstackCompute(master) {
+def installOpenstackCompute(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
     // Configure compute nodes
-    def compute_compound = 'I@nova:compute'
+    def compute_compound = "I@nova:compute ${extra_tgt}"
     if (salt.testTarget(master, compute_compound)) {
         // In case if infrastructure nodes are used as nova computes too
-        def gluster_compound = 'I@glusterfs:server'
-        def salt_ca_compound = 'I@salt:minion:ca:salt_master_ca'
+        def gluster_compound = "I@glusterfs:server ${extra_tgt}"
+        def salt_ca_compound = "I@salt:minion:ca:salt_master_ca ${extra_tgt}"
         // Enforce highstate asynchronous only on compute nodes which are not glusterfs and not salt ca servers
         def hightstateTarget = "${compute_compound} and not ${gluster_compound} and not ${salt_ca_compound}"
         if (salt.testTarget(master, hightstateTarget)) {
             retry(2) {
-                salt.enforceHighstateWithExclude(master, hightstateTarget, 'opencontrail.client')
+                salt.enforceHighstate(master, hightstateTarget)
             }
+        } else {
+            common.infoMsg("No minions matching highstate target found for target ${hightstateTarget}")
         }
         // Iterate through salt ca servers and check if they have compute role
         // TODO: switch to batch once salt 2017.7+ would be used
+        common.infoMsg("Checking whether ${salt_ca_compound} minions have ${compute_compound} compound")
         for ( target in salt.getMinionsSorted(master, salt_ca_compound) ) {
             for ( cmp_target in salt.getMinionsSorted(master, compute_compound) ) {
                 if ( target == cmp_target ) {
                     // Enforce highstate one by one on salt ca servers which are compute nodes
                     retry(2) {
-                        salt.enforceHighstateWithExclude(master, target, 'opencontrail.client')
+                        salt.enforceHighstate(master, target)
                     }
                 }
             }
         }
         // Iterate through glusterfs servers and check if they have compute role
         // TODO: switch to batch once salt 2017.7+ would be used
+        common.infoMsg("Checking whether ${gluster_compound} minions have ${compute_compound} compound")
         for ( target in salt.getMinionsSorted(master, gluster_compound) ) {
             for ( cmp_target in salt.getMinionsSorted(master, compute_compound) ) {
                 if ( target == cmp_target ) {
                     // Enforce highstate one by one on glusterfs servers which are compute nodes
                     retry(2) {
-                        salt.enforceHighstateWithExclude(master, target, 'opencontrail.client')
+                        salt.enforceHighstate(master, target)
                     }
                 }
             }
         }
     }
+
+    // Run nova:controller to map cmp with cells
+    salt.enforceState(master, "I@nova:controller:role:primary ${extra_tgt}", 'nova.controller', "I@nova:controller ${extra_tgt}")
 }
 
 
-def installContrailNetwork(master) {
+def installContrailNetwork(master, extra_tgt = '') {
     def common = new com.mirantis.mk.Common()
     def salt = new com.mirantis.mk.Salt()
-
+    def first_target
 
     // Install opencontrail database services
-    salt.enforceState(master, 'I@opencontrail:database and *01*', 'opencontrail.database')
-    salt.enforceState(master, 'I@opencontrail:database', 'opencontrail.database')
+    first_target = salt.getFirstMinion(master, "I@opencontrail:database ${extra_tgt}")
+    salt.enforceState(master, "${first_target} ${extra_tgt}", 'opencontrail.database')
+    salt.enforceState(master, "I@opencontrail:database ${extra_tgt}", 'opencontrail.database')
 
     // Install opencontrail control services
-    salt.enforceStateWithExclude(master, "I@opencontrail:control and *01*", "opencontrail", "opencontrail.client")
-    salt.enforceStateWithExclude(master, "I@opencontrail:control", "opencontrail", "opencontrail.client")
-    salt.enforceStateWithExclude(master, "I@opencontrail:collector and *01*", "opencontrail", "opencontrail.client")
+    first_target = salt.getFirstMinion(master, "I@opencontrail:control ${extra_tgt}")
+    salt.enforceStateWithExclude(master, "${first_target} ${extra_tgt}", "opencontrail", "opencontrail.client")
+    salt.enforceStateWithExclude(master, "I@opencontrail:control ${extra_tgt}", "opencontrail", "opencontrail.client")
+    first_target = salt.getFirstMinion(master, "I@opencontrail:collector ${extra_tgt}")
+    salt.enforceStateWithExclude(master, "${first_target} ${extra_tgt}", "opencontrail", "opencontrail.client")
+    salt.enforceStateWithExclude(master, "I@opencontrail:collector ${extra_tgt}", "opencontrail", "opencontrail.client")
 
-    if (salt.testTarget(master, 'I@docker:client and I@opencontrail:control')) {
-        salt.enforceState(master, 'I@opencontrail:control or I@opencontrail:collector', 'docker.client')
-    }
-    installBackup(master, 'contrail')
+    salt.enforceStateWithTest(master, "( I@opencontrail:control or I@opencontrail:collector ) ${extra_tgt}", 'docker.client', "I@docker:client and I@opencontrail:control ${extra_tgt}")
+    installBackup(master, 'contrail', extra_tgt)
 }
 
 
-def installContrailCompute(master) {
+def installContrailCompute(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
     // Configure compute nodes
     // Provision opencontrail control services
-    salt.enforceState(master, 'I@opencontrail:database:id:1', 'opencontrail.client')
+    salt.enforceState(master, "I@opencontrail:database:id:1 ${extra_tgt}", 'opencontrail.client')
     // Provision opencontrail virtual routers
 
     // Generate script /usr/lib/contrail/if-vhost0 for up vhost0
-    if (salt.testTarget(master, 'I@opencontrail:compute')) {
-        salt.enforceStateWithExclude(master, "I@opencontrail:compute", "opencontrail", "opencontrail.client")
+    if (salt.testTarget(master, "I@opencontrail:compute ${extra_tgt}")) {
+        salt.enforceStateWithExclude(master, "I@opencontrail:compute ${extra_tgt}", "opencontrail", "opencontrail.client")
     }
 
-    if (salt.testTarget(master, 'I@nova:compute')) {
-        salt.cmdRun(master, 'I@nova:compute', 'exec 0>&-; exec 1>&-; exec 2>&-; nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "', false)
+    if (salt.testTarget(master, "I@nova:compute ${extra_tgt}")) {
+        salt.cmdRun(master, "I@nova:compute ${extra_tgt}", 'exec 0>&-; exec 1>&-; exec 2>&-; nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "', false)
     }
 
     sleep(300)
-    if (salt.testTarget(master, 'I@opencontrail:compute')) {
-        salt.enforceState(master, 'I@opencontrail:compute', 'opencontrail.client')
-        salt.enforceState(master, 'I@opencontrail:compute', 'opencontrail')
-    }
+    salt.enforceStateWithTest(master, "I@opencontrail:compute ${extra_tgt}", 'opencontrail.client')
+    salt.enforceStateWithTest(master, "I@opencontrail:compute ${extra_tgt}", 'opencontrail')
 }
 
 
-def installKubernetesInfra(master) {
+def installKubernetesInfra(master, extra_tgt = '') {
     def common = new com.mirantis.mk.Common()
     common.warningMsg("You calling orchestrate.installKubernetesInfra(). This function is deprecated please use orchestrate.installInfra() directly")
-    installInfra(master)
+    installInfra(master, extra_tgt)
 }
 
 
-def installKubernetesControl(master) {
+def installKubernetesControl(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
+    def first_target
+    salt.fullRefresh(master, "* ${extra_tgt}")
+
+    // Bootstrap all nodes
+    salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", 'linux')
+    salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", 'salt.minion')
+    salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", ['openssh', 'ntp'])
+
+    // Create and distribute SSL certificates for services using salt state
+    salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", 'salt.minion.cert')
+
+    // Install docker
+    salt.enforceState(master, "I@docker:host ${extra_tgt}", 'docker.host')
+
+     // If network engine is not opencontrail, run addons state for kubernetes
+    if (!salt.getPillar(master, "I@kubernetes:master ${extra_tgt}", 'kubernetes:master:network:opencontrail:enabled')) {
+        salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", 'kubernetes.master.kube-addons')
+    }
 
     // Install Kubernetes pool and Calico
-    salt.enforceState(master, 'I@kubernetes:master', 'kubernetes.master.kube-addons')
-    salt.enforceState(master, 'I@kubernetes:pool', 'kubernetes.pool')
+    salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", 'kubernetes.pool')
 
-    if (salt.testTarget(master, 'I@etcd:server:setup')) {
+    if (salt.testTarget(master, "I@etcd:server:setup ${extra_tgt}")) {
         // Setup etcd server
-        salt.enforceState(master, 'I@kubernetes:master and *01*', 'etcd.server.setup')
+        first_target = salt.getFirstMinion(master, "I@kubernetes:master ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'etcd.server.setup')
     }
 
     // Run k8s master at *01* to simplify namespaces creation
-    salt.enforceStateWithExclude(master, 'I@kubernetes:master and *01*', "kubernetes.master", "kubernetes.master.setup")
+    first_target = salt.getFirstMinion(master, "I@kubernetes:master ${extra_tgt}")
 
-    // Run k8s without master.setup
-    salt.enforceStateWithExclude(master, 'I@kubernetes:master', "kubernetes", "kubernetes.master.setup")
+    // If network engine is opencontrail, run master state for kubernetes without kube-addons
+    // The kube-addons state will be called later only in case of opencontrail
+    if (salt.getPillar(master, "I@kubernetes:master ${extra_tgt}", 'kubernetes:master:network:opencontrail:enabled')) {
+        // Run k8s on first node without master.setup and master.kube-addons
+        salt.enforceStateWithExclude(master, "${first_target} ${extra_tgt}", "kubernetes.master", "kubernetes.master.setup,kubernetes.master.kube-addons")
+        // Run k8s without master.setup and master.kube-addons
+        salt.enforceStateWithExclude(master, "I@kubernetes:master ${extra_tgt}", "kubernetes", "kubernetes.master.setup,kubernetes.master.kube-addons")
+    } else {
+        // Run k8s on first node without master.setup and master.kube-addons
+        salt.enforceStateWithExclude(master, "${first_target} ${extra_tgt}", "kubernetes.master", "kubernetes.master.setup")
+        // Run k8s without master.setup
+        salt.enforceStateWithExclude(master, "I@kubernetes:master ${extra_tgt}", "kubernetes", "kubernetes.master.setup")
+    }
 
     // Run k8s master setup
-    salt.enforceState(master, 'I@kubernetes:master and *01*', 'kubernetes.master.setup')
+    salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", 'kubernetes.master.setup')
 
     // Restart kubelet
-    salt.runSaltProcessStep(master, 'I@kubernetes:pool', 'service.restart', ['kubelet'])
+    salt.runSaltProcessStep(master, "I@kubernetes:master ${extra_tgt}", 'service.restart', ['kubelet'])
 }
 
 
-def installKubernetesCompute(master) {
+def installKubernetesCompute(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     salt.fullRefresh(master, "*")
 
     // Bootstrap all nodes
-    salt.enforceState(master, 'I@kubernetes:pool', 'linux')
-    salt.enforceState(master, 'I@kubernetes:pool', 'salt.minion')
-    salt.enforceState(master, 'I@kubernetes:pool', ['openssh', 'ntp'])
+    salt.enforceState(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", 'linux')
+    salt.enforceState(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", 'salt.minion')
+    salt.enforceState(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", ['openssh', 'ntp'])
 
     // Create and distribute SSL certificates for services using salt state
-    salt.enforceState(master, 'I@kubernetes:pool', 'salt.minion.cert')
+    salt.enforceState(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", 'salt.minion.cert')
 
     // Install docker
-    salt.enforceState(master, 'I@docker:host', 'docker.host')
+    salt.enforceState(master, "I@docker:host ${extra_tgt}", 'docker.host')
 
     // Install Kubernetes and Calico
-    salt.enforceState(master, 'I@kubernetes:pool', 'kubernetes.pool')
+    salt.enforceState(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", 'kubernetes.pool')
 
     // Install Tiller and all configured releases
-    if (salt.testTarget(master, 'I@helm:client')) {
-        salt.enforceState(master, 'I@helm:client', 'helm')
-    }
+    salt.enforceStateWithTest(master, "I@helm:client ${extra_tgt}", 'helm')
+    salt.runSaltProcessStep(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", 'service.restart', ['kubelet'])
 }
 
 
-def installDockerSwarm(master) {
+def installDockerSwarm(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
 
     //Install and Configure Docker
-    salt.enforceState(master, 'I@docker:swarm', 'docker.host')
-    salt.enforceState(master, 'I@docker:swarm:role:master', 'docker.swarm')
-    salt.enforceState(master, 'I@docker:swarm', 'salt.minion.grains')
-    salt.runSaltProcessStep(master, 'I@docker:swarm', 'mine.update')
-    salt.runSaltProcessStep(master, 'I@docker:swarm', 'saltutil.refresh_modules')
-    sleep(5)
-    salt.enforceState(master, 'I@docker:swarm:role:master', 'docker.swarm')
-    salt.enforceState(master, 'I@docker:swarm:role:manager', 'docker.swarm')
-    sleep(10)
-    salt.cmdRun(master, 'I@docker:swarm:role:master', 'docker node ls')
+    if (salt.testTarget(master, "I@docker:swarm ${extra_tgt}")) {
+        salt.enforceState(master, "I@docker:swarm ${extra_tgt}", 'docker.host')
+        salt.enforceState(master, "I@docker:swarm:role:master ${extra_tgt}", 'docker.swarm')
+        salt.enforceState(master, "I@docker:swarm ${extra_tgt}", 'salt.minion.grains')
+        salt.runSaltProcessStep(master, "I@docker:swarm ${extra_tgt}", 'mine.update')
+        salt.runSaltProcessStep(master, "I@docker:swarm ${extra_tgt}", 'saltutil.refresh_modules')
+        sleep(5)
+        salt.enforceState(master, "I@docker:swarm:role:master ${extra_tgt}", 'docker.swarm')
+        salt.enforceStateWithTest(master, "I@docker:swarm:role:manager ${extra_tgt}", 'docker.swarm')
+        sleep(10)
+        salt.cmdRun(master, "I@docker:swarm:role:master ${extra_tgt}", 'docker node ls')
+    }
 }
 
+// Setup addons for kubernetes - For OpenContrail network engine
+// Use after compute nodes are ready, because K8s addons like DNS should be placed on cmp nodes
+def setupKubeAddonForContrail(master, extra_tgt = '') {
+    def salt = new com.mirantis.mk.Salt()
 
-def installCicd(master) {
+    if (salt.getPillar(master, "I@kubernetes:master ${extra_tgt}", 'kubernetes:master:network:opencontrail:enabled')){
+        // Setup  Addons for Kubernetes only in case of OpenContrail is used as neteork engine
+        salt.enforceState(master, "I@kubernetes:master ${extra_tgt}", 'kubernetes.master.kube-addons')
+    }
+}
+
+def installCicd(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
-    def gerrit_compound = 'I@gerrit:client and ci*'
-    def jenkins_compound = 'I@jenkins:client and ci*'
+    def gerrit_compound = "I@gerrit:client and ci* ${extra_tgt}"
+    def jenkins_compound = "I@jenkins:client and ci* ${extra_tgt}"
 
     salt.fullRefresh(master, gerrit_compound)
     salt.fullRefresh(master, jenkins_compound)
 
-    if (salt.testTarget(master, 'I@aptly:publisher')) {
-        salt.enforceState(master, 'I@aptly:publisher', 'aptly.publisher',true, null, false, -1, 2)
-    }
-
-    salt.enforceState(master, 'I@docker:swarm:role:master and I@jenkins:client', 'docker.client', true, true, null, false, -1, 2)
+    salt.enforceState(master, "I@docker:swarm:role:master and I@jenkins:client ${extra_tgt}", 'docker.client', true, true, null, false, -1, 2)
 
     // API timeout in minutes
     def wait_timeout = 10
@@ -721,8 +734,8 @@
 
     timeout(wait_timeout) {
       common.infoMsg('Waiting for Gerrit to come up..')
-      def check_gerrit_cmd = 'while [ `curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + gerrit_master_url + '/` -ne 200 ]; do sleep 0.5; done'
-      salt.cmdRun(master, gerrit_compound, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c ' + '"' + check_gerrit_cmd + '"')
+      def check_gerrit_cmd = 'while true; do curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + gerrit_master_url + '/ | grep 200 && break || sleep 1; done'
+      salt.cmdRun(master, gerrit_compound, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c -- ' + '"' + check_gerrit_cmd + '"')
     }
 
     // Jenkins
@@ -731,36 +744,28 @@
 
     timeout(wait_timeout) {
       common.infoMsg('Waiting for Jenkins to come up..')
-      def check_jenkins_cmd = 'while [ `curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + jenkins_master_url + '/whoAmI/` -ne 200 ]; do sleep 0.5; done'
-      salt.cmdRun(master, jenkins_compound, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c ' + '"' + check_jenkins_cmd + '"')
+      def check_jenkins_cmd = 'while true; do curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + jenkins_master_url + '/whoAmI/ | grep 200 && break || sleep 1; done'
+      salt.cmdRun(master, jenkins_compound, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c -- ' + '"' + check_jenkins_cmd + '"')
     }
 
-    if (salt.testTarget(master, 'I@aptly:server')) {
-        salt.enforceState(master, 'I@aptly:server', 'aptly', true, true, null, false, -1, 2)
-    }
+    salt.enforceStateWithTest(master, "I@openldap:client ${extra_tgt}", 'openldap', "", true, true, null, false, -1, 2)
 
-    if (salt.testTarget(master, 'I@openldap:client')) {
-        salt.enforceState(master, 'I@openldap:client', 'openldap', true, true, null, false, -1, 2)
-    }
-
-    if (salt.testTarget(master, 'I@python:environment')) {
-        salt.enforceState(master, 'I@python:environment', 'python')
-    }
+    salt.enforceStateWithTest(master, "I@python:environment ${extra_tgt}", 'python')
 
     withEnv(['ASK_ON_ERROR=false']){
         retry(2){
             try{
-                salt.enforceState(master, 'I@gerrit:client', 'gerrit')
+                salt.enforceState(master, "I@gerrit:client ${extra_tgt}", 'gerrit')
             }catch(e){
-                salt.fullRefresh(master, 'I@gerrit:client')
+                salt.fullRefresh(master, "I@gerrit:client ${extra_tgt}")
                 throw e //rethrow for retry handler
             }
         }
         retry(2){
             try{
-                salt.enforceState(master, 'I@jenkins:client', 'jenkins')
+                salt.enforceState(master, "I@jenkins:client ${extra_tgt}", 'jenkins')
             }catch(e){
-                salt.fullRefresh(master, 'I@jenkins:client')
+                salt.fullRefresh(master, "I@jenkins:client ${extra_tgt}")
                 throw e //rethrow for retry handler
             }
         }
@@ -768,101 +773,137 @@
 }
 
 
-def installStacklight(master) {
+def installStacklight(master, extra_tgt = '') {
     def common = new com.mirantis.mk.Common()
     def salt = new com.mirantis.mk.Salt()
+    def retries_wait = 20
+    def retries = 15
+    def first_target
 
     // Install core services for K8S environments:
     // HAProxy, Nginx and lusterFS clients
     // In case of OpenStack, those are already installed
     if (common.checkContains('STACK_INSTALL', 'k8s')) {
-        salt.enforceState(master, 'I@haproxy:proxy', 'haproxy')
-        salt.runSaltProcessStep(master, 'I@haproxy:proxy', 'service.status', ['haproxy'])
+        salt.enforceState(master, "I@haproxy:proxy ${extra_tgt}", 'haproxy')
+        salt.runSaltProcessStep(master, "I@haproxy:proxy ${extra_tgt}", 'service.status', ['haproxy'])
 
-        if (salt.testTarget(master, 'I@nginx:server')) {
-            salt.enforceState(master, 'I@nginx:server', 'nginx')
-        }
+        salt.enforceStateWithTest(master, "I@nginx:server ${extra_tgt}", 'nginx')
 
-        if (salt.testTarget(master, 'I@glusterfs:client')) {
-            salt.enforceState(master, 'I@glusterfs:client', 'glusterfs.client')
-        }
+        salt.enforceStateWithTest(master, "I@glusterfs:client ${extra_tgt}", 'glusterfs.client', "", true, true, null, false, -1, 2)
     }
 
-    // Launch containers
-    salt.enforceState(master, 'I@docker:swarm:role:master and I@prometheus:server', 'docker.client')
-    salt.runSaltProcessStep(master, 'I@docker:swarm and I@prometheus:server', 'dockerng.ps')
+    // Install MongoDB for Alerta
+    if (salt.testTarget(master, "I@mongodb:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@mongodb:server ${extra_tgt}", 'mongodb.server')
+
+        // Initialize mongodb replica set
+        common.retry(5,20){
+             salt.enforceState(master, "I@mongodb:server ${extra_tgt}", 'mongodb.cluster')
+        }
+    }
 
     //Install Telegraf
-    salt.enforceState(master, 'I@telegraf:agent or I@telegraf:remote_agent', 'telegraf')
+    salt.enforceState(master, "( I@telegraf:agent or I@telegraf:remote_agent ) ${extra_tgt}", 'telegraf')
 
     // Install Prometheus exporters
-    if (salt.testTarget(master, 'I@prometheus:exporters')) {
-        salt.enforceState(master, 'I@prometheus:exporters', 'prometheus')
-    }
+    salt.enforceStateWithTest(master, "I@prometheus:exporters ${extra_tgt}", 'prometheus')
 
     //Install Elasticsearch and Kibana
-    salt.enforceState(master, '*01* and  I@elasticsearch:server', 'elasticsearch.server')
-    salt.enforceState(master, 'I@elasticsearch:server', 'elasticsearch.server')
-    salt.enforceState(master, '*01* and I@kibana:server', 'kibana.server')
-    salt.enforceState(master, 'I@kibana:server', 'kibana.server')
-    salt.enforceState(master, 'I@elasticsearch:client', 'elasticsearch.client')
-    salt.enforceState(master, 'I@kibana:client', 'kibana.client')
+    if (salt.testTarget(master, "I@elasticsearch:server:enabled:true ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@elasticsearch:server:enabled:true ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'elasticsearch.server')
+    }
+    salt.enforceStateWithTest(master, "I@elasticsearch:server:enabled:true ${extra_tgt}", 'elasticsearch.server')
+    if (salt.testTarget(master, "I@kibana:server:enabled:true ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@kibana:server:enabled:true ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'kibana.server')
+    }
+    salt.enforceStateWithTest(master, "I@kibana:server:enabled:true ${extra_tgt}", 'kibana.server')
 
-    //Install InfluxDB
-    if (salt.testTarget(master, 'I@influxdb:server')) {
-        salt.enforceState(master, '*01* and I@influxdb:server', 'influxdb')
-        salt.enforceState(master, 'I@influxdb:server', 'influxdb')
+    // Check ES health cluster status
+    def pillar = salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:host')
+    def elasticsearch_vip
+    if(!pillar['return'].isEmpty()) {
+        elasticsearch_vip = pillar['return'][0].values()[0]
+    } else {
+        common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
+    }
+    pillar = salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:port')
+    def elasticsearch_port
+    if(!pillar['return'].isEmpty()) {
+        elasticsearch_port = pillar['return'][0].values()[0]
+    } else {
+        common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
+    }
+    common.retry(retries,retries_wait) {
+        common.infoMsg('Waiting for Elasticsearch to become green..')
+        salt.cmdRun(master, "I@elasticsearch:client ${extra_tgt}", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
     }
 
-    //Install Prometheus LTS
-    if (salt.testTarget(master, 'I@prometheus:relay')) {
-        salt.enforceState(master, 'I@prometheus:relay', 'prometheus')
+    common.retry(retries,retries_wait) {
+        salt.enforceState(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch.client')
+    }
+
+    common.retry(retries,retries_wait) {
+        salt.enforceState(master, "I@kibana:client ${extra_tgt}", 'kibana.client')
+    }
+
+    //Install InfluxDB
+    if (salt.testTarget(master, "I@influxdb:server ${extra_tgt}")) {
+        first_target = salt.getFirstMinion(master, "I@influxdb:server ${extra_tgt}")
+        salt.enforceState(master, "${first_target} ${extra_tgt}", 'influxdb')
+        salt.enforceState(master, "I@influxdb:server ${extra_tgt}", 'influxdb')
     }
 
     // Install service for the log collection
-    if (salt.testTarget(master, 'I@fluentd:agent')) {
-        salt.enforceState(master, 'I@fluentd:agent', 'fluentd')
+    if (salt.testTarget(master, "I@fluentd:agent ${extra_tgt}")) {
+        salt.enforceState(master, "I@fluentd:agent ${extra_tgt}", 'fluentd')
     } else {
-        salt.enforceState(master, 'I@heka:log_collector', 'heka.log_collector')
+        salt.enforceState(master, "I@heka:log_collector ${extra_tgt}", 'heka.log_collector')
     }
 
     // Install heka ceilometer collector
-    if (salt.testTarget(master, 'I@heka:ceilometer_collector:enabled')) {
-        salt.enforceState(master, 'I@heka:ceilometer_collector:enabled', 'heka.ceilometer_collector')
-        salt.runSaltProcessStep(master, 'I@heka:ceilometer_collector:enabled', 'service.restart', ['ceilometer_collector'], null, true)
+    if (salt.testTarget(master, "I@heka:ceilometer_collector:enabled ${extra_tgt}")) {
+        salt.enforceState(master, "I@heka:ceilometer_collector:enabled ${extra_tgt}", 'heka.ceilometer_collector')
+        salt.runSaltProcessStep(master, "I@heka:ceilometer_collector:enabled ${extra_tgt}", 'service.restart', ['ceilometer_collector'], null, true)
     }
 
     // Install galera
     if (common.checkContains('STACK_INSTALL', 'k8s')) {
-        salt.enforceState(master, 'I@galera:master', 'galera', true, true, null, false, -1, 2)
-        salt.enforceState(master, 'I@galera:slave', 'galera', true, true, null, false, -1, 2)
+        salt.enforceState(master, "I@galera:master ${extra_tgt}", 'galera', true, true, null, false, -1, 2)
+        salt.enforceState(master, "I@galera:slave ${extra_tgt}", 'galera', true, true, null, false, -1, 2)
 
         // Check galera status
-        salt.runSaltProcessStep(master, 'I@galera:master', 'mysql.status')
-        salt.runSaltProcessStep(master, 'I@galera:slave', 'mysql.status')
+        salt.runSaltProcessStep(master, "I@galera:master ${extra_tgt}", 'mysql.status')
+        salt.runSaltProcessStep(master, "I@galera:slave ${extra_tgt}", 'mysql.status')
     }
 
     //Collect Grains
-    salt.enforceState(master, 'I@salt:minion', 'salt.minion.grains')
-    salt.runSaltProcessStep(master, 'I@salt:minion', 'saltutil.refresh_modules')
-    salt.runSaltProcessStep(master, 'I@salt:minion', 'mine.update')
+    salt.enforceState(master, "I@salt:minion ${extra_tgt}", 'salt.minion.grains')
+    salt.runSaltProcessStep(master, "I@salt:minion ${extra_tgt}", 'saltutil.refresh_modules')
+    salt.runSaltProcessStep(master, "I@salt:minion ${extra_tgt}", 'mine.update')
     sleep(5)
 
     // Configure Prometheus in Docker Swarm
-    salt.enforceState(master, 'I@docker:swarm and I@prometheus:server', 'prometheus')
+    salt.enforceState(master, "I@docker:swarm and I@prometheus:server ${extra_tgt}", 'prometheus')
 
     //Configure Remote Collector in Docker Swarm for Openstack deployments
     if (!common.checkContains('STACK_INSTALL', 'k8s')) {
-        salt.enforceState(master, 'I@docker:swarm and I@prometheus:server', 'heka.remote_collector', true, false)
+        salt.enforceState(master, "I@docker:swarm and I@prometheus:server ${extra_tgt}", 'heka.remote_collector', true, false)
     }
 
+    // Launch containers
+    salt.enforceState(master, "I@docker:swarm:role:master and I@prometheus:server ${extra_tgt}", 'docker.client')
+    salt.runSaltProcessStep(master, "I@docker:swarm and I@prometheus:server ${extra_tgt}", 'dockerng.ps')
+
+    //Install Prometheus LTS
+    salt.enforceStateWithTest(master, "I@prometheus:relay ${extra_tgt}", 'prometheus')
+
     // Install sphinx server
-    if (salt.testTarget(master, 'I@sphinx:server')) {
-        salt.enforceState(master, 'I@sphinx:server', 'sphinx')
-    }
+    salt.enforceStateWithTest(master, "I@sphinx:server ${extra_tgt}", 'sphinx')
 
     //Configure Grafana
-    def pillar = salt.getPillar(master, 'ctl01*', '_param:stacklight_monitor_address')
+    pillar = salt.getPillar(master, "ctl01* ${extra_tgt}", '_param:stacklight_monitor_address')
     common.prettyPrint(pillar)
 
     def stacklight_vip
@@ -874,117 +915,117 @@
 
     common.infoMsg("Waiting for service on http://${stacklight_vip}:15013/ to start")
     sleep(120)
-    salt.enforceState(master, 'I@grafana:client', 'grafana.client')
+    salt.enforceState(master, "I@grafana:client ${extra_tgt}", 'grafana.client')
 }
 
-def installStacklightv1Control(master) {
+def installStacklightv1Control(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
 
     // infra install
     // Install the StackLight backends
-    salt.enforceState(master, '*01* and  I@elasticsearch:server', 'elasticsearch.server')
-    salt.enforceState(master, 'I@elasticsearch:server', 'elasticsearch.server')
+    salt.enforceState(master, "*01* and I@elasticsearch:server ${extra_tgt}", 'elasticsearch.server')
+    salt.enforceState(master, "I@elasticsearch:server ${extra_tgt}", 'elasticsearch.server')
 
-    salt.enforceState(master, '*01* and I@influxdb:server', 'influxdb')
-    salt.enforceState(master, 'I@influxdb:server', 'influxdb')
+    salt.enforceState(master, "*01* and I@influxdb:server ${extra_tgt}", 'influxdb')
+    salt.enforceState(master, "I@influxdb:server ${extra_tgt}", 'influxdb')
 
-    salt.enforceState(master, '*01* and I@kibana:server', 'kibana.server')
-    salt.enforceState(master, 'I@kibana:server', 'kibana.server')
+    salt.enforceState(master, "*01* and I@kibana:server ${extra_tgt}", 'kibana.server')
+    salt.enforceState(master, "I@kibana:server ${extra_tgt}", 'kibana.server')
 
-    salt.enforceState(master, '*01* and I@grafana:server','grafana.server')
-    salt.enforceState(master, 'I@grafana:server','grafana.server')
+    salt.enforceState(master, "*01* and I@grafana:server ${extra_tgt}",'grafana.server')
+    salt.enforceState(master, "I@grafana:server ${extra_tgt}",'grafana.server')
 
-    def alarming_service_pillar = salt.getPillar(master, 'mon*01*', '_param:alarming_service')
+    def alarming_service_pillar = salt.getPillar(master, "mon*01* ${extra_tgt}", '_param:alarming_service')
     def alarming_service = alarming_service_pillar['return'][0].values()[0]
 
     switch (alarming_service) {
         case 'sensu':
             // Update Sensu
-            salt.enforceState(master, 'I@sensu:server and I@rabbitmq:server', 'rabbitmq')
-            salt.enforceState(master, 'I@redis:cluster:role:master', 'redis')
-            salt.enforceState(master, 'I@redis:server', 'redis')
-            salt.enforceState(master, 'I@sensu:server', 'sensu')
+            salt.enforceState(master, "I@sensu:server and I@rabbitmq:server ${extra_tgt}", 'rabbitmq')
+            salt.enforceState(master, "I@redis:cluster:role:master ${extra_tgt}", 'redis')
+            salt.enforceState(master, "I@redis:server ${extra_tgt}", 'redis')
+            salt.enforceState(master, "I@sensu:server ${extra_tgt}", 'sensu')
         default:
             // Update Nagios
-            salt.enforceState(master, 'I@nagios:server', 'nagios.server')
+            salt.enforceState(master, "I@nagios:server ${extra_tgt}", 'nagios.server')
             // Stop the Nagios service because the package starts it by default and it will
             // started later only on the node holding the VIP address
-            salt.runSaltProcessStep(master, 'I@nagios:server', 'service.stop', ['nagios3'], null, true)
+            salt.runSaltProcessStep(master, "I@nagios:server ${extra_tgt}", 'service.stop', ['nagios3'], null, true)
     }
 
-    salt.enforceState(master, 'I@elasticsearch:client', 'elasticsearch.client.service')
-    salt.enforceState(master, 'I@kibana:client', 'kibana.client')
+    salt.enforceState(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch.client.service')
+    salt.enforceState(master, "I@kibana:client ${extra_tgt}", 'kibana.client')
 
     sleep(10)
 }
 
-def installStacklightv1Client(master) {
+def installStacklightv1Client(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
 
-    salt.cmdRun(master, 'I@elasticsearch:client', 'salt-call state.sls elasticsearch.client')
-    // salt.enforceState(master, 'I@elasticsearch:client', 'elasticsearch.client', true)
-    salt.cmdRun(master, 'I@kibana:client', 'salt-call state.sls kibana.client')
-    // salt.enforceState(master, 'I@kibana:client', 'kibana.client', true)
+    salt.cmdRun(master, "I@elasticsearch:client ${extra_tgt}", 'salt-call state.sls elasticsearch.client')
+    // salt.enforceState(master, "I@elasticsearch:client", 'elasticsearch.client", true)
+    salt.cmdRun(master, "I@kibana:client ${extra_tgt}", 'salt-call state.sls kibana.client')
+    // salt.enforceState(master, "I@kibana:client", 'kibana.client", true)
 
     // Install collectd, heka and sensu services on the nodes, this will also
     // generate the metadata that goes into the grains and eventually into Salt Mine
-    salt.enforceState(master, '*', 'collectd')
-    salt.enforceState(master, '*', 'salt.minion')
-    salt.enforceState(master, '*', 'heka')
+    salt.enforceState(master, "* ${extra_tgt}", 'collectd')
+    salt.enforceState(master, "* ${extra_tgt}", 'salt.minion')
+    salt.enforceState(master, "* ${extra_tgt}", 'heka')
 
     // Gather the Grafana metadata as grains
-    salt.enforceState(master, 'I@grafana:collector', 'grafana.collector', true)
+    salt.enforceState(master, "I@grafana:collector ${extra_tgt}", 'grafana.collector', true)
 
     // Update Salt Mine
-    salt.enforceState(master, '*', 'salt.minion.grains')
-    salt.runSaltProcessStep(master, '*', 'saltutil.refresh_modules')
-    salt.runSaltProcessStep(master, '*', 'mine.update')
+    salt.enforceState(master, "* ${extra_tgt}", 'salt.minion.grains')
+    salt.runSaltProcessStep(master, "* ${extra_tgt}", 'saltutil.refresh_modules')
+    salt.runSaltProcessStep(master, "* ${extra_tgt}", 'mine.update')
 
     sleep(5)
 
     // Update Heka
-    salt.enforceState(master, 'I@heka:aggregator:enabled:True or I@heka:remote_collector:enabled:True', 'heka')
+    salt.enforceState(master, "( I@heka:aggregator:enabled:True or I@heka:remote_collector:enabled:True ) ${extra_tgt}", 'heka')
 
     // Update collectd
-    salt.enforceState(master, 'I@collectd:remote_client:enabled:True', 'collectd')
+    salt.enforceState(master, "I@collectd:remote_client:enabled:True ${extra_tgt}", 'collectd')
 
-    def alarming_service_pillar = salt.getPillar(master, 'mon*01*', '_param:alarming_service')
+    def alarming_service_pillar = salt.getPillar(master, "mon*01* ${extra_tgt}", '_param:alarming_service')
     def alarming_service = alarming_service_pillar['return'][0].values()[0]
 
     switch (alarming_service) {
         case 'sensu':
             // Update Sensu
             // TODO for stacklight team, should be fixed in model
-            salt.enforceState(master, 'I@sensu:client', 'sensu')
+            salt.enforceState(master, "I@sensu:client ${extra_tgt}", 'sensu')
         default:
             break
             // Default is nagios, and was enforced in installStacklightControl()
     }
 
-    salt.cmdRun(master, 'I@grafana:client and *01*', 'salt-call state.sls grafana.client')
-    // salt.enforceState(master, 'I@grafana:client and *01*', 'grafana.client', true)
+    salt.cmdRun(master, "I@grafana:client and *01* ${extra_tgt}", 'salt-call state.sls grafana.client')
+    // salt.enforceState(master, "I@grafana:client and *01*", 'grafana.client", true)
 
     // Finalize the configuration of Grafana (add the dashboards...)
-    salt.enforceState(master, 'I@grafana:client and *01*', 'grafana.client')
-    salt.enforceState(master, 'I@grafana:client and *02*', 'grafana.client')
-    salt.enforceState(master, 'I@grafana:client and *03*', 'grafana.client')
-    // nw salt -C 'I@grafana:client' --async service.restart salt-minion; sleep 10
+    salt.enforceState(master, "I@grafana:client and *01* ${extra_tgt}", 'grafana.client')
+    salt.enforceState(master, "I@grafana:client and *02* ${extra_tgt}", 'grafana.client')
+    salt.enforceState(master, "I@grafana:client and *03* ${extra_tgt}", 'grafana.client')
+    // nw salt -C "I@grafana:client' --async service.restart salt-minion; sleep 10
 
     // Get the StackLight monitoring VIP addres
     //vip=$(salt-call pillar.data _param:stacklight_monitor_address --out key|grep _param: |awk '{print $2}')
     //vip=${vip:=172.16.10.253}
-    def pillar = salt.getPillar(master, 'ctl01*', '_param:stacklight_monitor_address')
+    def pillar = salt.getPillar(master, "ctl01* ${extra_tgt}", '_param:stacklight_monitor_address')
     common.prettyPrint(pillar)
     def stacklight_vip = pillar['return'][0].values()[0]
 
     if (stacklight_vip) {
         // (re)Start manually the services that are bound to the monitoring VIP
         common.infoMsg("restart services on node with IP: ${stacklight_vip}")
-        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip}", 'service.restart', ['remote_collectd'])
-        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip}", 'service.restart', ['remote_collector'])
-        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip}", 'service.restart', ['aggregator'])
-        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip}", 'service.restart', ['nagios3'])
+        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip} ${extra_tgt}", 'service.restart', ['remote_collectd'])
+        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip} ${extra_tgt}", 'service.restart', ['remote_collector'])
+        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip} ${extra_tgt}", 'service.restart', ['aggregator'])
+        salt.runSaltProcessStep(master, "G@ipv4:${stacklight_vip} ${extra_tgt}", 'service.restart', ['nagios3'])
     } else {
         throw new Exception("Missing stacklight_vip")
     }
@@ -994,69 +1035,59 @@
 // backups
 //
 
-def installBackup(master, component='common') {
+def installBackup(master, component='common', extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
     if (component == 'common') {
         // Install Backupninja
-        if (salt.testTarget(master, 'I@backupninja:client')) {
-            salt.enforceState(master, 'I@backupninja:client', 'salt.minion.grains')
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'saltutil.sync_grains')
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'mine.flush')
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'mine.update')
-            salt.enforceState(master, 'I@backupninja:client', 'backupninja')
+        if (salt.testTarget(master, "I@backupninja:client ${extra_tgt}")) {
+            salt.enforceState(master, "I@backupninja:client ${extra_tgt}", 'salt.minion.grains')
+            salt.runSaltProcessStep(master, "I@backupninja:client ${extra_tgt}", 'saltutil.sync_grains')
+            salt.runSaltProcessStep(master, "I@backupninja:client ${extra_tgt}", 'mine.flush')
+            salt.runSaltProcessStep(master, "I@backupninja:client ${extra_tgt}", 'mine.update')
+            salt.enforceState(master, "I@backupninja:client ${extra_tgt}", 'backupninja')
         }
-        if (salt.testTarget(master, 'I@backupninja:server')) {
-            salt.enforceState(master, 'I@backupninja:server', 'salt.minion.grains')
-            salt.enforceState(master, 'I@backupninja:server', 'backupninja')
-        }
+        salt.enforceStateWithTest(master, "I@backupninja:server ${extra_tgt}", 'salt.minion.grains')
+        salt.enforceStateWithTest(master, "I@backupninja:server ${extra_tgt}", 'backupninja')
     } else if (component == 'mysql') {
         // Install Xtrabackup
-        if (salt.testTarget(master, 'I@xtrabackup:client')) {
-            salt.enforceState(master, 'I@xtrabackup:client', 'salt.minion.grains')
-            salt.runSaltProcessStep(master, 'I@xtrabackup:client', 'saltutil.sync_grains')
-            salt.runSaltProcessStep(master, 'I@xtrabackup:client', 'mine.flush')
-            salt.runSaltProcessStep(master, 'I@xtrabackup:client', 'mine.update')
-            salt.enforceState(master, 'I@xtrabackup:client', 'xtrabackup')
+        if (salt.testTarget(master, "I@xtrabackup:client ${extra_tgt}")) {
+            salt.enforceState(master, "I@xtrabackup:client ${extra_tgt}", 'salt.minion.grains')
+            salt.runSaltProcessStep(master, "I@xtrabackup:client ${extra_tgt}", 'saltutil.sync_grains')
+            salt.runSaltProcessStep(master, "I@xtrabackup:client ${extra_tgt}", 'mine.flush')
+            salt.runSaltProcessStep(master, "I@xtrabackup:client ${extra_tgt}", 'mine.update')
+            salt.enforceState(master, "I@xtrabackup:client ${extra_tgt}", 'xtrabackup')
         }
-        if (salt.testTarget(master, 'I@xtrabackup:server')) {
-            salt.enforceState(master, 'I@xtrabackup:server', 'xtrabackup')
-        }
+        salt.enforceStateWithTest(master, "I@xtrabackup:server ${extra_tgt}", 'xtrabackup')
     } else if (component == 'contrail') {
 
         // Install Cassandra backup
-        if (salt.testTarget(master, 'I@cassandra:backup:client')) {
-            salt.enforceState(master, 'I@cassandra:backup:client', 'salt.minion.grains')
-            salt.runSaltProcessStep(master, 'I@cassandra:backup:client', 'saltutil.sync_grains')
-            salt.runSaltProcessStep(master, 'I@cassandra:backup:client', 'mine.flush')
-            salt.runSaltProcessStep(master, 'I@cassandra:backup:client', 'mine.update')
-            salt.enforceState(master, 'I@cassandra:backup:client', 'cassandra.backup')
+        if (salt.testTarget(master, "I@cassandra:backup:client ${extra_tgt}")) {
+            salt.enforceState(master, "I@cassandra:backup:client ${extra_tgt}", 'salt.minion.grains')
+            salt.runSaltProcessStep(master, "I@cassandra:backup:client ${extra_tgt}", 'saltutil.sync_grains')
+            salt.runSaltProcessStep(master, "I@cassandra:backup:client ${extra_tgt}", 'mine.flush')
+            salt.runSaltProcessStep(master, "I@cassandra:backup:client ${extra_tgt}", 'mine.update')
+            salt.enforceState(master, "I@cassandra:backup:client ${extra_tgt}", 'cassandra.backup')
         }
-        if (salt.testTarget(master, 'I@cassandra:backup:server')) {
-            salt.enforceState(master, 'I@cassandra:backup:server', 'cassandra.backup')
-        }
+        salt.enforceStateWithTest(master, "I@cassandra:backup:server ${extra_tgt}", 'cassandra.backup')
         // Install Zookeeper backup
-        if (salt.testTarget(master, 'I@zookeeper:backup:client')) {
-            salt.enforceState(master, 'I@zookeeper:backup:client', 'salt.minion.grains')
-            salt.runSaltProcessStep(master, 'I@zookeeper:backup:client', 'saltutil.sync_grains')
-            salt.runSaltProcessStep(master, 'I@zookeeper:backup:client', 'mine.flush')
-            salt.runSaltProcessStep(master, 'I@zookeeper:backup:client', 'mine.update')
-            salt.enforceState(master, 'I@zookeeper:backup:client', 'zookeeper.backup')
+        if (salt.testTarget(master, "I@zookeeper:backup:client ${extra_tgt}")) {
+            salt.enforceState(master, "I@zookeeper:backup:client ${extra_tgt}", 'salt.minion.grains')
+            salt.runSaltProcessStep(master, "I@zookeeper:backup:client ${extra_tgt}", 'saltutil.sync_grains')
+            salt.runSaltProcessStep(master, "I@zookeeper:backup:client ${extra_tgt}", 'mine.flush')
+            salt.runSaltProcessStep(master, "I@zookeeper:backup:client ${extra_tgt}", 'mine.update')
+            salt.enforceState(master, "I@zookeeper:backup:client ${extra_tgt}", 'zookeeper.backup')
         }
-        if (salt.testTarget(master, 'I@zookeeper:backup:server')) {
-            salt.enforceState(master, 'I@zookeeper:backup:server', 'zookeeper.backup')
-        }
+        salt.enforceStateWithTest(master, "I@zookeeper:backup:server ${extra_tgt}", 'zookeeper.backup')
     } else if (component == 'ceph') {
         // Install Ceph backup
-        if (salt.testTarget(master, 'I@ceph:backup:client')) {
-            salt.enforceState(master, 'I@ceph:backup:client', 'salt.minion.grains')
-            salt.runSaltProcessStep(master, 'I@ceph:backup:client', 'saltutil.sync_grains')
-            salt.runSaltProcessStep(master, 'I@ceph:backup:client', 'mine.flush')
-            salt.runSaltProcessStep(master, 'I@ceph:backup:client', 'mine.update')
-            salt.enforceState(master, 'I@ceph:backup:client', 'ceph.backup')
+        if (salt.testTarget(master, "I@ceph:backup:client ${extra_tgt}")) {
+            salt.enforceState(master, "I@ceph:backup:client ${extra_tgt}", 'salt.minion.grains')
+            salt.runSaltProcessStep(master, "I@ceph:backup:client ${extra_tgt}", 'saltutil.sync_grains')
+            salt.runSaltProcessStep(master, "I@ceph:backup:client ${extra_tgt}", 'mine.flush')
+            salt.runSaltProcessStep(master, "I@ceph:backup:client ${extra_tgt}", 'mine.update')
+            salt.enforceState(master, "I@ceph:backup:client ${extra_tgt}", 'ceph.backup')
         }
-        if (salt.testTarget(master, 'I@ceph:backup:server')) {
-            salt.enforceState(master, 'I@ceph:backup:server', 'ceph.backup')
-        }
+        salt.enforceStateWithTest(master, "I@ceph:backup:server ${extra_tgt}", 'ceph.backup')
     }
 
 }
@@ -1065,99 +1096,103 @@
 // Ceph
 //
 
-def installCephMon(master, target='I@ceph:mon') {
+def installCephMon(master, target="I@ceph:mon", extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
 
-    salt.enforceState(master, 'I@ceph:common', 'salt.minion.grains')
+    salt.enforceState(master, "I@ceph:common ${extra_tgt}", 'salt.minion.grains')
 
     // generate keyrings
-    if (salt.testTarget(master, 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin')) {
-        salt.enforceState(master, 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin', 'ceph.mon')
-        salt.runSaltProcessStep(master, 'I@ceph:mon', 'saltutil.sync_grains')
-        salt.runSaltProcessStep(master, 'I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin', 'mine.update')
+    if (salt.testTarget(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}")) {
+        salt.enforceState(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}", 'ceph.mon')
+        salt.runSaltProcessStep(master, "I@ceph:mon ${extra_tgt}", 'saltutil.sync_grains')
+        salt.runSaltProcessStep(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}", 'mine.update')
         sleep(5)
     }
     // install Ceph Mons
     salt.enforceState(master, target, 'ceph.mon')
-    if (salt.testTarget(master, 'I@ceph:mgr')) {
-        salt.enforceState(master, 'I@ceph:mgr', 'ceph.mgr')
-    }
+    salt.enforceStateWithTest(master, "I@ceph:mgr ${extra_tgt}", 'ceph.mgr')
 }
 
-def installCephOsd(master, target='I@ceph:osd', setup=true) {
+def installCephOsd(master, target="I@ceph:osd", setup=true, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
 
     // install Ceph OSDs
     salt.enforceState(master, target, 'ceph.osd')
-    salt.runSaltProcessStep(master, 'I@ceph:osd', 'saltutil.sync_grains')
+    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'saltutil.sync_grains')
     salt.enforceState(master, target, 'ceph.osd.custom')
-    salt.runSaltProcessStep(master, 'I@ceph:osd', 'saltutil.sync_grains')
-    salt.runSaltProcessStep(master, 'I@ceph:osd', 'mine.update')
+    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'saltutil.sync_grains')
+    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'mine.update')
     installBackup(master, 'ceph')
 
     // setup pools, keyrings and maybe crush
-    if (salt.testTarget(master, 'I@ceph:setup') && setup) {
+    if (salt.testTarget(master, "I@ceph:setup ${extra_tgt}") && setup) {
         sleep(5)
-        salt.enforceState(master, 'I@ceph:setup', 'ceph.setup')
+        salt.enforceState(master, "I@ceph:setup ${extra_tgt}", 'ceph.setup')
     }
 }
 
-def installCephClient(master) {
+def installCephClient(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
 
     // install Ceph Radosgw
-    if (salt.testTarget(master, 'I@ceph:radosgw')) {
-        salt.runSaltProcessStep(master, 'I@ceph:radosgw', 'saltutil.sync_grains')
-        salt.enforceState(master, 'I@ceph:radosgw', 'ceph.radosgw')
+    if (salt.testTarget(master, "I@ceph:radosgw ${extra_tgt} and I@node_role.openstack-control")) {
+        salt.runSaltProcessStep(master, "I@ceph:radosgw ${extra_tgt}", 'saltutil.sync_grains')
+        salt.enforceState(master, "I@ceph:radosgw ${extra_tgt}", 'ceph.radosgw')
     }
-    // setup Keystone service and endpoints for swift or / and S3
-    if (salt.testTarget(master, 'I@keystone:client')) {
-        salt.enforceState(master, 'I@keystone:client', 'keystone.client')
+
+    // setup keyring for Openstack services
+    salt.enforceStateWithTest(master, "I@ceph:common and I@glance:server ${extra_tgt}", ['ceph.common', 'ceph.setup.keyring'])
+
+    salt.enforceStateWithTest(master, "I@ceph:common and I@cinder:controller ${extra_tgt}", ['ceph.common', 'ceph.setup.keyring'])
+
+    if (salt.testTarget(master, "I@ceph:common and I@nova:compute ${extra_tgt}")) {
+        salt.enforceState(master, "I@ceph:common and I@nova:compute ${extra_tgt}", ['ceph.common', 'ceph.setup.keyring'])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute ${extra_tgt}", 'saltutil.sync_grains')
     }
+
+    salt.enforceStateWithTest(master, "I@ceph:common and I@gnocchi:server ${extra_tgt}", ['ceph.common', 'ceph.setup.keyring'])
 }
 
-def connectCeph(master) {
+def connectCeph(master, extra_tgt = '') {
     def salt = new com.mirantis.mk.Salt()
 
+    // setup Keystone service and endpoints for swift or / and S3
+    salt.enforceStateWithTest(master, "I@keystone:client ${extra_tgt}", 'keystone.client')
+
     // connect Ceph to the env
-    if (salt.testTarget(master, 'I@ceph:common and I@glance:server')) {
-        salt.enforceState(master, 'I@ceph:common and I@glance:server', ['ceph.common', 'ceph.setup.keyring', 'glance'])
-        salt.runSaltProcessStep(master, 'I@ceph:common and I@glance:server', 'service.restart', ['glance-api'])
+    if (salt.testTarget(master, "I@ceph:common and I@glance:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@ceph:common and I@glance:server ${extra_tgt}", ['glance'])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@glance:server ${extra_tgt}", 'service.restart', ['glance-api'])
     }
-    if (salt.testTarget(master, 'I@ceph:common and I@cinder:controller')) {
-        salt.enforceState(master, 'I@ceph:common and I@cinder:controller', ['ceph.common', 'ceph.setup.keyring', 'cinder'])
-        salt.runSaltProcessStep(master, 'I@ceph:common and I@cinder:controller', 'service.restart', ['cinder-volume'])
+    if (salt.testTarget(master, "I@ceph:common and I@cinder:controller ${extra_tgt}")) {
+        salt.enforceState(master, "I@ceph:common and I@cinder:controller ${extra_tgt}", ['cinder'])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@cinder:controller ${extra_tgt}", 'service.restart', ['cinder-volume'])
     }
-    if (salt.testTarget(master, 'I@ceph:common and I@nova:compute')) {
-        salt.enforceState(master, 'I@ceph:common and I@nova:compute', ['ceph.common', 'ceph.setup.keyring'])
-        salt.runSaltProcessStep(master, 'I@ceph:common and I@nova:compute', 'saltutil.sync_grains')
-        salt.enforceState(master, 'I@ceph:common and I@nova:compute', ['nova'])
-        salt.runSaltProcessStep(master, 'I@ceph:common and I@nova:compute', 'service.restart', ['nova-compute'])
+    if (salt.testTarget(master, "I@ceph:common and I@nova:compute ${extra_tgt}")) {
+        salt.enforceState(master, "I@ceph:common and I@nova:compute ${extra_tgt}", ['nova'])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute ${extra_tgt}", 'service.restart', ['nova-compute'])
+    }
+    if (salt.testTarget(master, "I@ceph:common and I@gnocchi:server ${extra_tgt}")) {
+        salt.enforceState(master, "I@ceph:common and I@gnocchi:server:role:primary ${extra_tgt}", 'gnocchi.server')
+        salt.enforceState(master, "I@ceph:common and I@gnocchi:server ${extra_tgt}", 'gnocchi.server')
     }
 }
 
-def installOssInfra(master) {
+def installOssInfra(master, extra_tgt = '') {
   def common = new com.mirantis.mk.Common()
   def salt = new com.mirantis.mk.Salt()
 
-  if (!common.checkContains('STACK_INSTALL', 'k8s') || !common.checkContains('STACK_INSTALL', 'openstack')) {
-    def orchestrate = new com.mirantis.mk.Orchestrate()
-    orchestrate.installInfra(master)
-  }
-
-  if (salt.testTarget(master, 'I@devops_portal:config')) {
-    salt.enforceState(master, 'I@devops_portal:config', 'devops_portal.config')
-    salt.enforceState(master, 'I@rundeck:client', ['linux.system.user', 'openssh'])
-    salt.enforceState(master, 'I@rundeck:server', 'rundeck.server')
-  }
+  salt.enforceStateWithTest(master, "I@devops_portal:config ${extra_tgt}", 'devops_portal.config', )
+  salt.enforceStateWithTest(master, "I@rundeck:client ${extra_tgt}", ['linux.system.user', 'openssh'], "I@devops_portal:config ${extra_tgt}")
+  salt.enforceStateWithTest(master, "I@rundeck:server ${extra_tgt}", 'rundeck.server', "I@devops_portal:config ${extra_tgt}")
 }
 
-def installOss(master) {
+def installOss(master, extra_tgt = '') {
   def common = new com.mirantis.mk.Common()
   def salt = new com.mirantis.mk.Salt()
 
   //Get oss VIP address
-  def pillar = salt.getPillar(master, 'cfg01*', '_param:stacklight_monitor_address')
+  def pillar = salt.getPillar(master, "cfg01* ${extra_tgt}", '_param:stacklight_monitor_address')
   common.prettyPrint(pillar)
 
   def oss_vip
@@ -1170,28 +1205,28 @@
   // Postgres client - initialize OSS services databases
   timeout(120){
     common.infoMsg("Waiting for postgresql database to come up..")
-    salt.cmdRun(master, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db 2>&1 | grep "ready to accept"; then break; else sleep 5; fi; done')
+    salt.cmdRun(master, "I@postgresql:client ${extra_tgt}", 'while true; do if docker service logs postgresql_postgresql-db 2>&1 | grep "ready to accept"; then break; else sleep 5; fi; done')
   }
   // XXX: first run usually fails on some inserts, but we need to create databases at first
-  salt.enforceState(master, 'I@postgresql:client', 'postgresql.client', true, false)
+  salt.enforceState(master, "I@postgresql:client ${extra_tgt}", 'postgresql.client', true, false)
 
   // Setup postgres database with integration between
   // Pushkin notification service and Security Monkey security audit service
   timeout(10) {
     common.infoMsg("Waiting for Pushkin to come up..")
-    salt.cmdRun(master, 'I@postgresql:client', "while true; do curl -sf ${oss_vip}:8887/apps >/dev/null && break; done")
+    salt.cmdRun(master, "I@postgresql:client ${extra_tgt}", "while true; do curl -sf ${oss_vip}:8887/apps >/dev/null && break; done")
   }
-  salt.enforceState(master, 'I@postgresql:client', 'postgresql.client')
+  salt.enforceState(master, "I@postgresql:client ${extra_tgt}", 'postgresql.client')
 
   // Rundeck
   timeout(10) {
     common.infoMsg("Waiting for Rundeck to come up..")
-    salt.cmdRun(master, 'I@rundeck:client', "while true; do curl -sf ${oss_vip}:4440 >/dev/null && break; done")
+    salt.cmdRun(master, "I@rundeck:client ${extra_tgt}", "while true; do curl -sf ${oss_vip}:4440 >/dev/null && break; done")
   }
-  salt.enforceState(master, 'I@rundeck:client', 'rundeck.client')
+  salt.enforceState(master, "I@rundeck:client ${extra_tgt}", 'rundeck.client')
 
   // Elasticsearch
-  pillar = salt.getPillar(master, 'I@elasticsearch:client', 'elasticsearch:client:server:host')
+  pillar = salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:host')
   def elasticsearch_vip
   if(!pillar['return'].isEmpty()) {
     elasticsearch_vip = pillar['return'][0].values()[0]
@@ -1201,7 +1236,35 @@
 
   timeout(10) {
     common.infoMsg('Waiting for Elasticsearch to come up..')
-    salt.cmdRun(master, 'I@elasticsearch:client', "while true; do curl -sf ${elasticsearch_vip}:9200 >/dev/null && break; done")
+    salt.cmdRun(master, "I@elasticsearch:client ${extra_tgt}", "while true; do curl -sf ${elasticsearch_vip}:9200 >/dev/null && break; done")
   }
-  salt.enforceState(master, 'I@elasticsearch:client', 'elasticsearch.client')
+  salt.enforceState(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch.client')
 }
+
+/**
+ * Function receives connection string, target and configuration yaml pattern
+ * and retrieves config fom salt minion according to pattern. After that it
+ * sorts applications according to priorities and runs orchestration states
+ * @param master Salt Connection object or pepperEnv
+ * @param tgt Target
+ * @param conf Configuration pattern
+ */
+def OrchestrateApplications(master, tgt, conf) {
+    def salt = new com.mirantis.mk.Salt()
+    def common = new com.mirantis.mk.Common()
+    def _orch = salt.getConfig(master, tgt, conf)
+    if ( !_orch['return'][0].values()[0].isEmpty() ) {
+      Map<String,Integer> _orch_app = [:]
+      for (k in _orch['return'][0].values()[0].keySet()) {
+        _orch_app[k] = _orch['return'][0].values()[0][k].values()[0].toInteger()
+      }
+      def _orch_app_sorted = common.SortMapByValueAsc(_orch_app)
+      common.infoMsg("Applications will be deployed in following order:"+_orch_app_sorted.keySet())
+      for (app in _orch_app_sorted.keySet()) {
+        salt.orchestrateSystem(master, ['expression': tgt, 'type': 'compound'], "${app}.orchestrate.deploy")
+      }
+    }
+    else {
+      common.infoMsg("No applications found for orchestration")
+    }
+}
\ No newline at end of file
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index b587f1b..ca209be 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -64,9 +64,13 @@
         'client': client,
         'expr_form': target.type,
     ]
-    if(batch != null && ( (batch instanceof Integer && batch > 0) || (batch instanceof String && batch.contains("%")))){
-        data['client']= "local_batch"
-        data['batch'] = batch
+
+    if(batch != null){
+        batch = batch.toString()
+        if( (batch.isInteger() && batch.toInteger() > 0) || (batch.contains("%"))){
+            data['client']= "local_batch"
+            data['batch'] = batch
+        }
     }
 
     if (args) {
@@ -128,6 +132,16 @@
     }
 }
 
+/**
+ * Return config items for given saltId and target
+ * @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+ * @param target Get grain target
+ * @param config grain name (optional)
+ * @return output of salt command
+ */
+def getConfig(saltId, target, config) {
+    return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'config.get', null, [config.replace('.', ':')], '--out=json')
+}
 
 /**
  * Enforces state on given saltId and target
@@ -150,6 +164,39 @@
     return enforceState(saltId, target, state, output, failOnError, batch, optional, read_timeout, retries, queue, saltArgs)
 }
 
+/**
+ * Allows to test the given target for reachability and if reachable enforces the state
+* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+ * @param target State enforcing target
+ * @param state Salt state
+ * @param testTargetMatcher Salt compound matcher to be tested (default is empty string). If empty string, param `target` will be used for tests
+ * @param output print output (optional, default true)
+ * @param failOnError throw exception on salt state result:false (optional, default true)
+ * @param batch salt batch parameter integer or string with percents (optional, default null - disable batch)
+ * @param optional Optional flag (if true pipeline will continue even if no minions for target found)
+ * @param read_timeout http session read timeout (optional, default -1 - disabled)
+ * @param retries Retry count for salt state. (optional, default -1 - no retries)
+ * @param queue salt queue parameter for state.sls calls (optional, default true) - CANNOT BE USED WITH BATCH
+ * @param saltArgs additional salt args eq. ["runas=aptly"]
+ * @return output of salt command
+ */
+def enforceStateWithTest(saltId, target, state, testTargetMatcher = "", output = true, failOnError = true, batch = null, optional = false, read_timeout=-1, retries=-1, queue=true, saltArgs=[]) {
+    def common = new com.mirantis.mk.Common()
+    if (!testTargetMatcher) {
+        testTargetMatcher = target
+    }
+    if (testTarget(saltId, testTargetMatcher)) {
+        return enforceState(saltId, target, state, output, failOnError, batch, false, read_timeout, retries, queue, saltArgs)
+    } else {
+        if (!optional) {
+                common.infoMsg("No Minions matched the target matcher: ${testTargetMatcher}, and 'optional' param was set to false. - This may signify missing pillar definition!!")
+//              throw new Exception("No Minions matched the target matcher: ${testTargetMatcher}.") TODO: Change the infoMsg to Error once the methods are changed to Use named params and optional param will be set globally
+            } else {
+                common.infoMsg("No Minions matched the target matcher: ${testTargetMatcher}, but 'optional' param was set to true - Pipeline continues. ")
+            }
+    }
+}
+
 /* Enforces state on given saltId and target
  * @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
  * @param target State enforcing target
@@ -162,9 +209,10 @@
  * @param retries Retry count for salt state. (optional, default -1 - no retries)
  * @param queue salt queue parameter for state.sls calls (optional, default true) - CANNOT BE USED WITH BATCH
  * @param saltArgs additional salt args eq. ["runas=aptly", exclude="opencontrail.database"]
+ * @param minionRestartWaitTimeout specifies timeout that we should wait after minion restart.
  * @return output of salt command
  */
-def enforceState(saltId, target, state, output = true, failOnError = true, batch = null, optional = false, read_timeout=-1, retries=-1, queue=true, saltArgs = []) {
+def enforceState(saltId, target, state, output = true, failOnError = true, batch = null, optional = false, read_timeout=-1, retries=-1, queue=true, saltArgs = [], minionRestartWaitTimeout=10) {
     def common = new com.mirantis.mk.Common()
     // add state to salt args
     if (state instanceof String) {
@@ -196,7 +244,7 @@
             out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'state.sls', batch, saltArgs.reverse(), kwargs, -1, read_timeout)
             checkResult(out, failOnError, output)
         }
-        waitForMinion(out)
+        waitForMinion(out, minionRestartWaitTimeout)
         return out
     } else {
         common.infoMsg("No Minions matched the target given, but 'optional' param was set to true - Pipeline continues. ")
@@ -233,12 +281,20 @@
                 def node = out["return"][i];
                 for(int j=0;j<node.size();j++){
                     def nodeKey = node.keySet()[j]
-                    if (!node[nodeKey].contains("Salt command execution success")) {
-                        throw new Exception("Execution of cmd ${originalCmd} failed. Server returns: ${node[nodeKey]}")
+                    if (node[nodeKey] instanceof String) {
+                        if (!node[nodeKey].contains("Salt command execution success")) {
+                            throw new Exception("Execution of cmd ${originalCmd} failed. Server returns: ${node[nodeKey]}")
+                        }
+                    } else if (node[nodeKey] instanceof Boolean) {
+                        if (!node[nodeKey]) {
+                            throw new Exception("Execution of cmd ${originalCmd} failed. Server returns: ${node[nodeKey]}")
+                        }
+                    } else {
+                        throw new Exception("Execution of cmd ${originalCmd} failed. Server returns unexpected data type: ${node[nodeKey]}")
                     }
                 }
             }
-        }else{
+        } else {
             throw new Exception("Salt Api response doesn't have return param!")
         }
     }
@@ -261,7 +317,7 @@
  * @param answers how many minions should return (optional, default 1)
  * @return output of salt command
  */
-def minionPresent(saltId, target, minion_name, waitUntilPresent = true, batch=null, output = true, maxRetries = 200, answers = 1) {
+def minionPresent(saltId, target, minion_name, waitUntilPresent = true, batch=null, output = true, maxRetries = 180, answers = 1) {
     minion_name = minion_name.replace("*", "")
     def common = new com.mirantis.mk.Common()
     common.infoMsg("Looking for minion: " + minion_name)
@@ -269,27 +325,35 @@
     if (waitUntilPresent){
         def count = 0
         while(count < maxRetries) {
+            try {
+                def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, 5)
+                if (output) {
+                    printSaltCommandResult(out)
+                }
+                def valueMap = out["return"][0]
+                def result = valueMap.get(valueMap.keySet()[0])
+                def resultsArray = result.tokenize("\n")
+                def size = resultsArray.size()
+                if (size >= answers) {
+                    return out
+                }
+                count++
+                sleep(time: 1000, unit: 'MILLISECONDS')
+                common.infoMsg("Waiting for ${cmd} on ${target} to be in correct state")
+            } catch (Exception er) {
+                common.infoMsg('[WARNING]: runSaltCommand command read timeout within 5 seconds. You have very slow or broken environment')
+            }
+        }
+    } else {
+        try {
             def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, 5)
             if (output) {
                 printSaltCommandResult(out)
             }
-            def valueMap = out["return"][0]
-            def result = valueMap.get(valueMap.keySet()[0])
-            def resultsArray = result.tokenize("\n")
-            def size = resultsArray.size()
-            if (size >= answers) {
-                return out
-            }
-            count++
-            sleep(time: 500, unit: 'MILLISECONDS')
-            common.infoMsg("Waiting for ${cmd} on ${target} to be in correct state")
+            return out
+        } catch (Exception er) {
+            common.infoMsg('[WARNING]: runSaltCommand command read timeout within 5 seconds. You have very slow or broken environment')
         }
-    } else {
-        def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, 5)
-        if (output) {
-            printSaltCommandResult(out)
-        }
-        return out
     }
     // otherwise throw exception
     common.errorMsg("Status of command ${cmd} on ${target} failed, please check it.")
@@ -498,7 +562,7 @@
  * @return output of salt command
  */
 def enforceHighstate(saltId, target, output = false, failOnError = true, batch = null, saltArgs = []) {
-    def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'state.highstate', batch)
+    def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'state.highstate', batch, saltArgs)
     def common = new com.mirantis.mk.Common()
 
     common.infoMsg("Running state highstate on ${target}")
@@ -536,7 +600,7 @@
  */
 def getFirstMinion(saltId, target) {
     def minionsSorted = getMinionsSorted(saltId, target)
-    return minionsSorted[0].split("\\.")[0]
+    return minionsSorted[0]
 }
 
 /**
@@ -654,10 +718,65 @@
  * @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
  * @param target Orchestration target
  * @param orchestrate Salt orchestrate params
+ * @param kwargs Salt orchestrate params
  * @return output of salt command
  */
-def orchestrateSystem(saltId, target, orchestrate) {
-    return runSaltCommand(saltId, 'runner', target, 'state.orchestrate', [orchestrate])
+def orchestrateSystem(saltId, target, orchestrate=[], kwargs = null) {
+    //Since the runSaltCommand uses "arg" (singular) for "runner" client this won`t work correctly on old salt 2016
+    //cause this version of salt used "args" (plural) for "runner" client, see following link for reference:
+    //https://github.com/saltstack/salt/pull/32938
+    def common = new com.mirantis.mk.Common()
+    def result = runSaltCommand(saltId, 'runner', target, 'state.orchestrate', true, orchestrate, kwargs, 7200, 7200)
+        if(result != null){
+            if(result['return']){
+                def retcode = result['return'][0].get('retcode')
+                if (retcode != 0) {
+                    throw new Exception("Orchestration state failed while running: "+orchestrate)
+                }else{
+                    common.infoMsg("Orchestrate state "+orchestrate+" succeeded")
+                }
+            }else{
+                common.errorMsg("Salt result has no return attribute! Result: ${result}")
+            }
+        }else{
+            common.errorMsg("Cannot check salt result, given result is null")
+        }
+}
+
+/**
+ * Run salt pre or post orchestrate tasks
+ *
+ * @param  saltId       Salt Connection object or pepperEnv (the command will be sent using the selected method)
+ * @param  pillar_tree  Reclass pillar that has orchestrate pillar for desired stage
+ * @param  extra_tgt    Extra targets for compound
+ *
+ * @return              output of salt command
+ */
+def orchestratePrePost(saltId, pillar_tree, extra_tgt = '') {
+
+    def common = new com.mirantis.mk.Common()
+    def salt = new com.mirantis.mk.Salt()
+    def compound = 'I@' + pillar_tree + " " + extra_tgt
+
+    common.infoMsg("Refreshing pillars")
+    runSaltProcessStep(saltId, '*', 'saltutil.refresh_pillar', [], null, true)
+
+    common.infoMsg("Looking for orchestrate pillars")
+    if (salt.testTarget(saltId, compound)) {
+        for ( node in salt.getMinionsSorted(saltId, compound) ) {
+            def pillar = salt.getPillar(saltId, node, pillar_tree)
+            if ( !pillar['return'].isEmpty() ) {
+                for ( orch_id in pillar['return'][0].values() ) {
+                    def orchestrator = orch_id.values()['orchestrator']
+                    def orch_enabled = orch_id.values()['enabled']
+                    if ( orch_enabled ) {
+                        common.infoMsg("Orchestrating: ${orchestrator}")
+                        salt.printSaltCommandResult(salt.orchestrateSystem(saltId, ['expression': node], [orchestrator]))
+                    }
+                }
+            }
+        }
+    }
 }
 
 /**
@@ -724,6 +843,8 @@
                             def resKey;
                             if(node instanceof Map){
                                 resKey = node.keySet()[k]
+                                if (resKey == "retcode")
+                                    continue
                             }else if(node instanceof List){
                                 resKey = k
                             }
@@ -801,7 +922,7 @@
 *
 * @param result    Parsed response of Salt API
 */
-def waitForMinion(result) {
+def waitForMinion(result, minionRestartWaitTimeout=10) {
     def common = new com.mirantis.mk.Common()
     //In order to prevent multiple sleeps use bool variable to catch restart for any minion.
     def isMinionRestarted = false
@@ -843,8 +964,8 @@
         }
     }
     if (isMinionRestarted){
-        common.infoMsg("Salt minion service restart detected. Sleep 10 seconds to wait minion restart")
-        sleep(10)
+      common.infoMsg("Salt minion service restart detected. Sleep ${minionRestartWaitTimeout} seconds to wait minion restart")
+        sleep(minionRestartWaitTimeout)
     }
 }
 
@@ -883,8 +1004,8 @@
  * @param file      File path to read (/etc/hosts for example)
  */
 
-def getFileContent(saltId, target, file) {
-    result = cmdRun(saltId, target, "cat ${file}")
+def getFileContent(saltId, target, file, checkResponse = true, batch=null, output = true, saltArgs = []) {
+    result = cmdRun(saltId, target, "cat ${file}", checkResponse, batch, output, saltArgs)
     return result['return'][0].values()[0].replaceAll('Salt command execution success','')
 }
 
@@ -894,9 +1015,10 @@
  * @param saltId         Salt Connection object or pepperEnv (the command will be sent using the selected method)
  * @param salt_overrides YAML formatted string containing key: value, one per line
  * @param reclass_dir    Directory where Reclass git repo is located
+ * @param extra_tgt      Extra targets for compound
  */
 
-def setSaltOverrides(saltId, salt_overrides, reclass_dir="/srv/salt/reclass") {
+def setSaltOverrides(saltId, salt_overrides, reclass_dir="/srv/salt/reclass", extra_tgt = '') {
     def common = new com.mirantis.mk.Common()
     def salt_overrides_map = readYaml text: salt_overrides
     for (entry in common.entries(salt_overrides_map)) {
@@ -904,9 +1026,9 @@
          def value = entry[1]
 
          common.debugMsg("Set salt override ${key}=${value}")
-         runSaltProcessStep(saltId, 'I@salt:master', 'reclass.cluster_meta_set', [key, value], false)
+         runSaltProcessStep(saltId, "I@salt:master ${extra_tgt}", 'reclass.cluster_meta_set', [key, value], false)
     }
-    runSaltProcessStep(saltId, 'I@salt:master', 'cmd.run', ["git -C ${reclass_dir} update-index --skip-worktree classes/cluster/overrides.yml"])
+    runSaltProcessStep(saltId, "I@salt:master ${extra_tgt}", 'cmd.run', ["git -C ${reclass_dir} update-index --skip-worktree classes/cluster/overrides.yml"])
 }
 
 /**
diff --git a/src/com/mirantis/mk/SaltModelTesting.groovy b/src/com/mirantis/mk/SaltModelTesting.groovy
index f0921c4..2d1a888 100644
--- a/src/com/mirantis/mk/SaltModelTesting.groovy
+++ b/src/com/mirantis/mk/SaltModelTesting.groovy
@@ -1,83 +1,535 @@
 package com.mirantis.mk
 
 /**
- * setup and test salt-master
- *
- * @param masterName          salt master's name
- * @param clusterName         model cluster name
- * @param extraFormulas       extraFormulas to install
- * @param formulasSource      formulas source (git or pkg)
- * @param reclassVersion      Version of used reclass (branch, tag, ...) (optional, default master)
- * @param testDir             directory of model
- * @param formulasSource      Salt formulas source type (optional, default pkg)
- * @param formulasRevision    APT revision for formulas (optional default stable)
- * @param ignoreClassNotfound Ignore missing classes for reclass model
- * @param dockerMaxCpus       max cpus passed to docker (default 0, disabled)
- * @param legacyTestingMode   do you want to enable legacy testing mode (iterating through the nodes directory definitions instead of reading cluster models)
- * @param aptRepoUrl          package repository with salt formulas
- * @param aptRepoGPG          GPG key for apt repository with formulas
+ * Setup Docker to run some tests. Returns true/false based on
+ were tests successful or not.
+ * @param config - LinkedHashMap with configuration params:
+ *   dockerHostname - (required) Hostname to use for Docker container.
+ *   formulasRevision - (optional) Revision of packages to use (default proposed).
+ *   runCommands - (optional) Dict with closure structure of body required tests. For example:
+ *     [ '001_Test': { sh("./run-some-test") }, '002_Test': { sh("./run-another-test") } ]
+ *     Before execution runCommands will be sorted by key names. Alpabetical order is preferred.
+ *   runFinally - (optional) Dict with closure structure of body required commands, which should be
+ *     executed in any case of test results. Same format as for runCommands
+ *   updateRepo - (optional) Whether to run common repo update step.
+ *   dockerContainerName - (optional) Docker container name.
+ *   dockerImageName - (optional) Docker image name
+ *   dockerMaxCpus - (optional) Number of CPUS to use in Docker.
+ *   dockerExtraOpts - (optional) Array of Docker extra opts for container
+ *   envOpts - (optional) Array of variables that should be passed as ENV vars to Docker container.
+ * Return true | false
  */
 
-def setupAndTestNode(masterName, clusterName, extraFormulas, testDir, formulasSource = 'pkg', formulasRevision = 'stable', reclassVersion = "master", dockerMaxCpus = 0, ignoreClassNotfound = false, legacyTestingMode = false, aptRepoUrl='', aptRepoGPG='') {
-  // timeout for test execution (40min)
-  def testTimeout = 40 * 60
-  def saltOpts = "--retcode-passthrough --force-color"
-  def common = new com.mirantis.mk.Common()
-  def workspace = common.getWorkspace()
-  def img = docker.image("mirantis/salt:saltstack-ubuntu-xenial-salt-2017.7")
-  img.pull()
+def setupDockerAndTest(LinkedHashMap config) {
+    def common = new com.mirantis.mk.Common()
+    def TestMarkerResult = false
+    // setup options
+    def defaultContainerName = 'test-' + UUID.randomUUID().toString()
+    def dockerHostname = config.get('dockerHostname', defaultContainerName)
+    def formulasRevision = config.get('formulasRevision', 'proposed')
+    def runCommands = config.get('runCommands', [:])
+    def runFinally = config.get('runFinally', [:])
+    def baseRepoPreConfig = config.get('baseRepoPreConfig', true)
+    def dockerContainerName = config.get('dockerContainerName', defaultContainerName)
+    def dockerImageName = config.get('image', "mirantis/salt:saltstack-ubuntu-xenial-salt-2017.7")
+    def dockerMaxCpus = config.get('dockerMaxCpus', 4)
+    def dockerExtraOpts = config.get('dockerExtraOpts', [])
+    def envOpts = config.get('envOpts', [])
+    envOpts.add("DISTRIB_REVISION=${formulasRevision}")
+    def dockerBaseOpts = [
+        '-u root:root',
+        "--hostname=${dockerHostname}",
+        '--ulimit nofile=4096:8192',
+        "--name=${dockerContainerName}",
+        "--cpus=${dockerMaxCpus}"
+    ]
 
-  if (!extraFormulas || extraFormulas == "") {
-    extraFormulas = "linux"
-  }
+    def dockerOptsFinal = (dockerBaseOpts + dockerExtraOpts).join(' ')
+    def defaultExtraReposYaml = '''
+---
+distrib_revision: 'nightly'
+aprConfD: |-
+  APT::Get::AllowUnauthenticated 'true';
+  APT::Get::Install-Suggests 'false';
+  APT::Get::Install-Recommends 'false';
+repo:
+  mcp_saltstack:
+    source: "deb [arch=amd64] http://mirror.mirantis.com/SUB_DISTRIB_REVISION/saltstack-2017.7/xenial xenial main"
+    pinning: |-
+        Package: libsodium18
+        Pin: release o=SaltStack
+        Pin-Priority: 50
 
-  def dockerMaxCpusOption = ""
-  if (dockerMaxCpus > 0) {
-    dockerMaxCpusOption = "--cpus=${dockerMaxCpus}"
-  }
+        Package: *
+        Pin: release o=SaltStack
+        Pin-Priority: 1100
+  mcp_extra:
+    source: "deb [arch=amd64] http://mirror.mirantis.com/SUB_DISTRIB_REVISION/extra/xenial xenial main"
+  ubuntu:
+    source: "deb [arch=amd64] http://mirror.mirantis.com/SUB_DISTRIB_REVISION/ubuntu xenial main restricted universe"
+  ubuntu-upd:
+    source: "deb [arch=amd64] http://mirror.mirantis.com/SUB_DISTRIB_REVISION/ubuntu xenial-updates main restricted universe"
+  ubuntu-sec:
+    source: "deb [arch=amd64] http://mirror.mirantis.com/SUB_DISTRIB_REVISION/ubuntu xenial-security main restricted universe"
+'''
+    def img = docker.image(dockerImageName)
+    def extraReposYaml = config.get('extraReposYaml', defaultExtraReposYaml)
 
-  img.inside("-u root:root --hostname=${masterName} --ulimit nofile=4096:8192 ${dockerMaxCpusOption}") {
-    withEnv(["FORMULAS_SOURCE=${formulasSource}", "EXTRA_FORMULAS=${extraFormulas}", "DISTRIB_REVISION=${formulasRevision}",
-            "DEBUG=1", "MASTER_HOSTNAME=${masterName}", "CLUSTER_NAME=${clusterName}", "MINION_ID=${masterName}",
-            "RECLASS_VERSION=${reclassVersion}", "RECLASS_IGNORE_CLASS_NOTFOUND=${ignoreClassNotfound}", "APT_REPOSITORY=${aptRepoUrl}",
-            "APT_REPOSITORY_GPG=${aptRepoGPG}"]){
-        sh("git clone https://github.com/salt-formulas/salt-formulas-scripts /srv/salt/scripts")
-        sh("""rsync -ah ${testDir}/* /srv/salt/reclass && echo '127.0.1.2  salt' >> /etc/hosts
-              cd /srv/salt && find . -type f \\( -name '*.yml' -or -name '*.sh' \\) -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;
-              cd /srv/salt && find . -type f \\( -name '*.yml' -or -name '*.sh' \\) -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;""")
-        sh("""for s in \$(python -c \"import site; print(' '.join(site.getsitepackages()))\"); do
-                  sudo -H pip install --install-option=\"--prefix=\" --upgrade --force-reinstall -I \
-                    -t \"\$s\" git+https://github.com/salt-formulas/reclass.git@${reclassVersion};
-                done""")
-        sh("""timeout ${testTimeout} bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts && source_local_envs && configure_salt_master && configure_salt_minion && install_salt_formula_pkg'
-              bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts && saltservice_restart'""")
-        sh("timeout ${testTimeout} bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts && source_local_envs && saltmaster_init'")
+    img.pull()
 
-        if (!legacyTestingMode.toBoolean()) {
-           sh("bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts && verify_salt_minions'")
+    try {
+        img.inside(dockerOptsFinal) {
+            withEnv(envOpts) {
+                try {
+                    // Currently, we don't have any other point to install
+                    // runtime dependencies for tests.
+                    if (baseRepoPreConfig) {
+                        // Warning! POssible point of 'allow-downgrades' issue
+                        // Probably, need to add such flag into apt.prefs
+                        sh("""#!/bin/bash -xe
+                            echo "Installing extra-deb dependencies inside docker:"
+                            echo > /etc/apt/sources.list
+                            rm -vf /etc/apt/sources.list.d/* || true
+                        """)
+                        common.debianExtraRepos(extraReposYaml)
+                        sh('''#!/bin/bash -xe
+                            apt-get update
+                            apt-get install -y python-netaddr reclass
+                        ''')
+
+                    }
+                    runCommands.sort().each { command, body ->
+                        common.warningMsg("Running command: ${command}")
+                        // doCall is the closure implementation in groovy, allow to pass arguments to closure
+                        body.call()
+                    }
+                    // If we didn't dropped for now - test has been passed.
+                    TestMarkerResult = true
+                }
+                finally {
+                    runFinally.sort().each { command, body ->
+                        common.warningMsg("Running ${command} command.")
+                        // doCall is the closure implementation in groovy, allow to pass arguments to closure
+                        body.call()
+                    }
+                }
+            }
         }
     }
+    catch (Exception er) {
+        common.warningMsg("IgnoreMe:Something wrong with img.Message:\n" + er.toString())
+    }
+
+    try {
+        common.warningMsg("IgnoreMe:Force cleanup slave.Ignore docker-daemon errors")
+        timeout(time: 10, unit: 'SECONDS') {
+            sh(script: "set -x; docker kill ${dockerContainerName} || true", returnStdout: true)
+        }
+        timeout(time: 10, unit: 'SECONDS') {
+            sh(script: "set -x; docker rm --force ${dockerContainerName} || true", returnStdout: true)
+        }
+    }
+    catch (Exception er) {
+        common.warningMsg("IgnoreMe:Timeout to delete test docker container with force!Message:\n" + er.toString())
+    }
+
+    if (TestMarkerResult) {
+        common.infoMsg("Test finished: SUCCESS")
+    } else {
+        common.warningMsg("Test finished: FAILURE")
+    }
+    return TestMarkerResult
+}
+
+/**
+ * Wrapper around setupDockerAndTest, to run checks against new Reclass version
+ * that current model is compatible with new Reclass.
+ *
+ * @param config - LinkedHashMap with configuration params:
+ *   dockerHostname - (required) Hostname to use for Docker container.
+ *   distribRevision - (optional) Revision of packages to use (default proposed).
+ *   extraRepo - (optional) Extra repo to use to install new Reclass version. Has
+ *     high priority on distribRevision
+ *   targetNodes - (required) List nodes to check pillar data.
+ */
+def compareReclassVersions(config) {
+    def common = new com.mirantis.mk.Common()
+    def salt = new com.mirantis.mk.Salt()
+    common.infoMsg("Going to test new reclass for CFG node")
+    def distribRevision = config.get('distribRevision', 'proposed')
+    def venv = config.get('venv')
+    def extraRepo = config.get('extraRepo', '')
+    def extraRepoKey = config.get('extraRepoKey', '')
+    def targetNodes = config.get('targetNodes')
+    sh "rm -rf ${env.WORKSPACE}/old ${env.WORKSPACE}/new"
+    sh "mkdir -p ${env.WORKSPACE}/old ${env.WORKSPACE}/new"
+    def configRun = [
+        'formulasRevision': distribRevision,
+        'dockerExtraOpts' : [
+            "-v /srv/salt/reclass:/srv/salt/reclass:ro",
+            "-v /etc/salt:/etc/salt:ro",
+            "-v /usr/share/salt-formulas/:/usr/share/salt-formulas/:ro"
+        ],
+        'envOpts'         : [
+            "WORKSPACE=${env.WORKSPACE}",
+            "NODES_LIST=${targetNodes.join(' ')}"
+        ],
+        'runCommands'     : [
+            '001_Update_Reclass_package'    : {
+                sh('apt-get update && apt-get install -y reclass')
+            },
+            '002_Test_Reclass_Compatibility': {
+                sh('''
+                reclass-salt -b /srv/salt/reclass -t > ${WORKSPACE}/new/inventory || exit 1
+                for node in $NODES_LIST; do
+                    reclass-salt -b /srv/salt/reclass -p $node > ${WORKSPACE}/new/$node || exit 1
+                done
+              ''')
+            }
+        ]
+    ]
+    if (extraRepo) {
+        // FIXME
+        configRun['runCommands']['0001_Additional_Extra_Repo_Passed'] = {
+            sh("""
+                echo "${extraRepo}" > /etc/apt/sources.list.d/mcp_extra.list
+                [ "${extraRepoKey}" ] && wget -O - ${extraRepoKey} | apt-key add -
+            """)
+        }
+    }
+    if (setupDockerAndTest(configRun)) {
+        common.infoMsg("New reclass version is compatible with current model: SUCCESS")
+        def inventoryOld = salt.cmdRun(venv, "I@salt:master", "reclass-salt -b /srv/salt/reclass -t", true, null, true).get("return")[0].values()[0]
+        // [0..-31] to exclude 'echo Salt command execution success' from output
+        writeFile(file: "${env.WORKSPACE}/old/inventory", text: inventoryOld[0..-31])
+        for (String node in targetNodes) {
+            def nodeOut = salt.cmdRun(venv, "I@salt:master", "reclass-salt -b /srv/salt/reclass -p ${node}", true, null, true).get("return")[0].values()[0]
+            writeFile(file: "${env.WORKSPACE}/old/${node}", text: nodeOut[0..-31])
+        }
+        def reclassDiff = common.comparePillars(env.WORKSPACE, env.BUILD_URL, '')
+        currentBuild.description = reclassDiff
+        if (reclassDiff != '<b>No job changes</b>') {
+            throw new RuntimeException("Pillars with new reclass version has been changed: FAILED")
+        } else {
+            common.infoMsg("Pillars not changed with new reclass version: SUCCESS")
+        }
+    } else {
+        throw new RuntimeException("New reclass version is not compatible with current model: FAILED")
+    }
+}
+
+/**
+ * Wrapper over setupDockerAndTest, to test CC model.
+ *
+ * @param config - dict with params:
+ *   dockerHostname - (required) salt master's name
+ *   clusterName - (optional) model cluster name
+ *   extraFormulas - (optional) extraFormulas to install. DEPRECATED
+ *   formulasSource - (optional) formulas source (git or pkg, default pkg)
+ *   reclassVersion - (optional) Version of used reclass (branch, tag, ...) (optional, default master)
+ *   reclassEnv - (require) directory of model
+ *   ignoreClassNotfound - (optional) Ignore missing classes for reclass model (default false)
+ *   aptRepoUrl - (optional) package repository with salt formulas
+ *   aptRepoGPG - (optional) GPG key for apt repository with formulas
+ *   testContext - (optional) Description of test
+ Return: true\exception
+ */
+
+def testNode(LinkedHashMap config) {
+    def common = new com.mirantis.mk.Common()
+    def result = ''
+    def dockerHostname = config.get('dockerHostname')
+    def reclassEnv = config.get('reclassEnv')
+    def clusterName = config.get('clusterName', "")
+    def formulasSource = config.get('formulasSource', 'pkg')
+    def extraFormulas = config.get('extraFormulas', 'linux')
+    def reclassVersion = config.get('reclassVersion', 'master')
+    def ignoreClassNotfound = config.get('ignoreClassNotfound', false)
+    def aptRepoUrl = config.get('aptRepoUrl', "")
+    def aptRepoGPG = config.get('aptRepoGPG', "")
+    def testContext = config.get('testContext', 'test')
+    config['envOpts'] = [
+        "RECLASS_ENV=${reclassEnv}", "SALT_STOPSTART_WAIT=5",
+        "MASTER_HOSTNAME=${dockerHostname}", "CLUSTER_NAME=${clusterName}",
+        "MINION_ID=${dockerHostname}", "FORMULAS_SOURCE=${formulasSource}",
+        "EXTRA_FORMULAS=${extraFormulas}", "RECLASS_VERSION=${reclassVersion}",
+        "RECLASS_IGNORE_CLASS_NOTFOUND=${ignoreClassNotfound}", "DEBUG=1",
+        "APT_REPOSITORY=${aptRepoUrl}", "APT_REPOSITORY_GPG=${aptRepoGPG}",
+        "EXTRA_FORMULAS_PKG_ALL=true"
+    ]
+
+    config['runCommands'] = [
+        '001_Clone_salt_formulas_scripts': {
+            sh(script: 'git clone https://github.com/salt-formulas/salt-formulas-scripts /srv/salt/scripts', returnStdout: true)
+        },
+
+        '002_Prepare_something'          : {
+            sh('''rsync -ah ${RECLASS_ENV}/* /srv/salt/reclass && echo '127.0.1.2  salt' >> /etc/hosts
+              cd /srv/salt && find . -type f \\( -name '*.yml' -or -name '*.sh' \\) -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;
+              cd /srv/salt && find . -type f \\( -name '*.yml' -or -name '*.sh' \\) -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;
+            ''')
+        },
+
+        '004_Run_tests'                  : {
+            def testTimeout = 40 * 60
+            timeout(time: testTimeout, unit: 'SECONDS') {
+                sh('''#!/bin/bash
+                source /srv/salt/scripts/bootstrap.sh
+                cd /srv/salt/scripts
+                source_local_envs
+                configure_salt_master
+                configure_salt_minion
+                install_salt_formula_pkg
+                source /srv/salt/scripts/bootstrap.sh
+                cd /srv/salt/scripts
+                saltservice_restart''')
+
+                sh('''#!/bin/bash
+                source /srv/salt/scripts/bootstrap.sh
+                cd /srv/salt/scripts
+                source_local_envs
+                saltmaster_init''')
+
+                sh('''#!/bin/bash
+                source /srv/salt/scripts/bootstrap.sh
+                cd /srv/salt/scripts
+                verify_salt_minions''')
+            }
+        }
+    ]
+    config['runFinally'] = [
+        '001_Archive_artefacts': {
+            sh(script: "cd /tmp; tar -czf ${env.WORKSPACE}/nodesinfo.tar.gz *reclass*", returnStatus: true)
+            archiveArtifacts artifacts: "nodesinfo.tar.gz"
+        }
+    ]
+    testResult = setupDockerAndTest(config)
+    if (testResult) {
+        common.infoMsg("Node test for context: ${testContext} model: ${reclassEnv} finished: SUCCESS")
+    } else {
+        throw new RuntimeException("Node test for context: ${testContext} model: ${reclassEnv} finished: FAILURE")
+    }
+    return testResult
+}
+
+/**
+ * setup and test salt-master
+ *
+ * @param masterName salt master's name
+ * @param clusterName model cluster name
+ * @param extraFormulas extraFormulas to install. DEPRECATED
+ * @param formulasSource formulas source (git or pkg)
+ * @param reclassVersion Version of used reclass (branch, tag, ...) (optional, default master)
+ * @param testDir directory of model
+ * @param formulasSource Salt formulas source type (optional, default pkg)
+ * @param formulasRevision APT revision for formulas (optional default stable)
+ * @param ignoreClassNotfound Ignore missing classes for reclass model
+ * @param dockerMaxCpus max cpus passed to docker (default 0, disabled)
+ * @param legacyTestingMode do you want to enable legacy testing mode (iterating through the nodes directory definitions instead of reading cluster models)
+ * @param aptRepoUrl package repository with salt formulas
+ * @param aptRepoGPG GPG key for apt repository with formulas
+ * Return                     true | false
+ */
+
+def setupAndTestNode(masterName, clusterName, extraFormulas = '*', testDir, formulasSource = 'pkg',
+                     formulasRevision = 'stable', reclassVersion = "master", dockerMaxCpus = 0,
+                     ignoreClassNotfound = false, legacyTestingMode = false, aptRepoUrl = '', aptRepoGPG = '', dockerContainerName = false) {
+    def common = new com.mirantis.mk.Common()
+    // TODO
+    common.errorMsg('You are using deprecated function!Please migrate to "setupDockerAndTest".' +
+        'It would be removed after 2018.q4 release!Pushing forced 60s sleep..')
+    sh('sleep 60')
+    // timeout for test execution (40min)
+    def testTimeout = 40 * 60
+    def TestMarkerResult = false
+    def saltOpts = "--retcode-passthrough --force-color"
+    def workspace = common.getWorkspace()
+    def img = docker.image("mirantis/salt:saltstack-ubuntu-xenial-salt-2017.7")
+    img.pull()
+
+    if (formulasSource == 'pkg') {
+        if (extraFormulas) {
+            common.warningMsg("You have passed deprecated variable:extraFormulas=${extraFormulas}. " +
+                "\n It would be ignored, and all formulas would be installed anyway")
+        }
+    }
+    if (!dockerContainerName) {
+        dockerContainerName = 'setupAndTestNode' + UUID.randomUUID().toString()
+    }
+    def dockerMaxCpusOpt = "--cpus=4"
+    if (dockerMaxCpus > 0) {
+        dockerMaxCpusOpt = "--cpus=${dockerMaxCpus}"
+    }
+    try {
+        img.inside("-u root:root --hostname=${masterName} --ulimit nofile=4096:8192 ${dockerMaxCpusOpt} --name=${dockerContainerName}") {
+            withEnv(["FORMULAS_SOURCE=${formulasSource}", "EXTRA_FORMULAS=${extraFormulas}", "EXTRA_FORMULAS_PKG_ALL=true",
+                     "DISTRIB_REVISION=${formulasRevision}",
+                     "DEBUG=1", "MASTER_HOSTNAME=${masterName}",
+                     "CLUSTER_NAME=${clusterName}", "MINION_ID=${masterName}",
+                     "RECLASS_VERSION=${reclassVersion}", "RECLASS_IGNORE_CLASS_NOTFOUND=${ignoreClassNotfound}",
+                     "APT_REPOSITORY=${aptRepoUrl}", "SALT_STOPSTART_WAIT=5",
+                     "APT_REPOSITORY_GPG=${aptRepoGPG}"]) {
+                try {
+                    // Currently, we don't have any other point to install
+                    // runtime dependencies for tests.
+                    sh("""#!/bin/bash -xe
+            echo "Installing extra-deb dependencies inside docker:"
+            echo "APT::Get::AllowUnauthenticated 'true';"  > /etc/apt/apt.conf.d/99setupAndTestNode
+            echo "APT::Get::Install-Suggests 'false';"  >> /etc/apt/apt.conf.d/99setupAndTestNode
+            echo "APT::Get::Install-Recommends 'false';"  >> /etc/apt/apt.conf.d/99setupAndTestNode
+            rm -vf /etc/apt/sources.list.d/* || true
+            echo 'deb [arch=amd64] http://mirror.mirantis.com/$DISTRIB_REVISION/ubuntu xenial main restricted universe' > /etc/apt/sources.list
+            echo 'deb [arch=amd64] http://mirror.mirantis.com/$DISTRIB_REVISION/ubuntu xenial-updates main restricted universe' >> /etc/apt/sources.list
+            apt-get update
+            apt-get install -y python-netaddr
+            """)
+                    sh(script: "git clone https://github.com/salt-formulas/salt-formulas-scripts /srv/salt/scripts", returnStdout: true)
+                    sh("""rsync -ah ${testDir}/* /srv/salt/reclass && echo '127.0.1.2  salt' >> /etc/hosts
+            cd /srv/salt && find . -type f \\( -name '*.yml' -or -name '*.sh' \\) -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;
+            cd /srv/salt && find . -type f \\( -name '*.yml' -or -name '*.sh' \\) -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;
+            """)
+                    // FIXME: should be changed to use reclass from mcp_extra_nigtly?
+                    sh("""for s in \$(python -c \"import site; print(' '.join(site.getsitepackages()))\"); do
+            sudo -H pip install --install-option=\"--prefix=\" --upgrade --force-reinstall -I \
+            -t \"\$s\" git+https://github.com/salt-formulas/reclass.git@${reclassVersion};
+            done""")
+                    timeout(time: testTimeout, unit: 'SECONDS') {
+                        sh('''#!/bin/bash
+              source /srv/salt/scripts/bootstrap.sh
+              cd /srv/salt/scripts
+              source_local_envs
+              configure_salt_master
+              configure_salt_minion
+              install_salt_formula_pkg
+              source /srv/salt/scripts/bootstrap.sh
+              cd /srv/salt/scripts
+              saltservice_restart''')
+                        sh('''#!/bin/bash
+              source /srv/salt/scripts/bootstrap.sh
+              cd /srv/salt/scripts
+              source_local_envs
+              saltmaster_init''')
+
+                        if (!legacyTestingMode.toBoolean()) {
+                            sh('''#!/bin/bash
+                source /srv/salt/scripts/bootstrap.sh
+                cd /srv/salt/scripts
+                verify_salt_minions
+                ''')
+                        }
+                    }
+                    // If we didn't dropped for now - test has been passed.
+                    TestMarkerResult = true
+                }
+
+                finally {
+                    // Collect rendered per-node data.Those info could be simply used
+                    // for diff processing. Data was generated via reclass.cli --nodeinfo,
+                    /// during verify_salt_minions.
+                    sh(script: "cd /tmp; tar -czf ${env.WORKSPACE}/nodesinfo.tar.gz *reclass*", returnStatus: true)
+                    archiveArtifacts artifacts: "nodesinfo.tar.gz"
+                }
+            }
+        }
+    }
+    catch (Exception er) {
+        common.warningMsg("IgnoreMe:Something wrong with img.Message:\n" + er.toString())
+    }
 
     if (legacyTestingMode.toBoolean()) {
-      common.infoMsg("Running legacy mode test for master hostname ${masterName}")
-      def nodes = sh script: "find /srv/salt/reclass/nodes -name '*.yml' | grep -v 'cfg*.yml'", returnStdout: true
-      for (minion in nodes.tokenize()) {
-        def basename = sh script: "set +x;basename ${minion} .yml", returnStdout: true
-        if (!basename.trim().contains(masterName)) {
-          testMinion(basename.trim())
+        common.infoMsg("Running legacy mode test for master hostname ${masterName}")
+        def nodes = sh(script: "find /srv/salt/reclass/nodes -name '*.yml' | grep -v 'cfg*.yml'", returnStdout: true)
+        for (minion in nodes.tokenize()) {
+            def basename = sh(script: "set +x;basename ${minion} .yml", returnStdout: true)
+            if (!basename.trim().contains(masterName)) {
+                testMinion(basename.trim())
+            }
         }
-      }
     }
-  }
+
+    try {
+        common.warningMsg("IgnoreMe:Force cleanup slave.Ignore docker-daemon errors")
+        timeout(time: 10, unit: 'SECONDS') {
+            sh(script: "set -x; docker kill ${dockerContainerName} || true", returnStdout: true)
+        }
+        timeout(time: 10, unit: 'SECONDS') {
+            sh(script: "set -x; docker rm --force ${dockerContainerName} || true", returnStdout: true)
+        }
+    }
+    catch (Exception er) {
+        common.warningMsg("IgnoreMe:Timeout to delete test docker container with force!Message:\n" + er.toString())
+    }
+
+    if (TestMarkerResult) {
+        common.infoMsg("Test finished: SUCCESS")
+    } else {
+        common.warningMsg("Test finished: FAILURE")
+    }
+    return TestMarkerResult
+
 }
 
 /**
  * Test salt-minion
  *
- * @param minion          salt minion
+ * @param minion salt minion
  */
 
-def testMinion(minionName)
-{
-  sh("bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts && verify_salt_minion ${minionName}'")
+def testMinion(minionName) {
+    sh(script: "bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts && verify_salt_minion ${minionName}'", returnStdout: true)
+}
+
+/**
+ * Wrapper over setupAndTestNode, to test exactly one CC model.
+ Whole workspace and model - should be pre-rendered and passed via MODELS_TARGZ
+ Flow: grab all data, and pass to setupAndTestNode function
+ under-modell will be directly mirrored to `model/{cfg.testReclassEnv}/* /srv/salt/reclass/*`
+ *
+ * @param cfg - dict with params:
+ MODELS_TARGZ       http link to arch with (models|contexts|global_reclass)
+ modelFile
+ DockerCName        directly passed to setupAndTestNode
+ EXTRA_FORMULAS     directly passed to setupAndTestNode
+ DISTRIB_REVISION   directly passed to setupAndTestNode
+ reclassVersion     directly passed to setupAndTestNode
+
+ Return: true\exception
+ */
+
+def testCCModel(cfg) {
+    def common = new com.mirantis.mk.Common()
+    common.errorMsg('You are using deprecated function!Please migrate to "testNode".' +
+        'It would be removed after 2018.q4 release!Pushing forced 60s sleep..')
+    sh('sleep 60')
+    sh(script: 'find . -mindepth 1 -delete || true', returnStatus: true)
+    sh(script: "wget --progress=dot:mega --auth-no-challenge -O models.tar.gz ${cfg.MODELS_TARGZ}")
+    // unpack data
+    sh(script: "tar -xzf models.tar.gz ")
+    common.infoMsg("Going to test exactly one context: ${cfg.modelFile}\n, with params: ${cfg}")
+    content = readFile(file: cfg.modelFile)
+    templateContext = readYaml text: content
+    clusterName = templateContext.default_context.cluster_name
+    clusterDomain = templateContext.default_context.cluster_domain
+
+    def testResult = false
+    testResult = setupAndTestNode(
+        "cfg01.${clusterDomain}",
+        clusterName,
+        '',
+        cfg.testReclassEnv, // Sync into image exactly one env
+        'pkg',
+        cfg.DISTRIB_REVISION,
+        cfg.reclassVersion,
+        0,
+        false,
+        false,
+        '',
+        '',
+        cfg.DockerCName)
+    if (testResult) {
+        common.infoMsg("testCCModel for context: ${cfg.modelFile} model: ${cfg.testReclassEnv} finished: SUCCESS")
+    } else {
+        throw new RuntimeException("testCCModel for context: ${cfg.modelFile} model: ${cfg.testReclassEnv} finished: FAILURE")
+    }
+    return testResult
 }