Merge "Add postgresql client usage with timeouts"
diff --git a/abort-long-running-jobs.groovy b/abort-long-running-jobs.groovy
index f4b4d36..0a5fa2a 100644
--- a/abort-long-running-jobs.groovy
+++ b/abort-long-running-jobs.groovy
@@ -4,38 +4,14 @@
  *  MAX_DURATION_IN_HOURS - max permitted job duration in hours
  */
 common = new com.mirantis.mk.Common()
+jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
 
 node{
   stage("Kill long running jobs"){
-    def jobKilled = false
     for (int i=0; i < Jenkins.instance.items.size(); i++) {
-      killStuckBuilds(3600 * Integer.parseInt(MAX_DURATION_IN_HOURS), Jenkins.instance.items[i])
+      if(!jenkinsUtils.killStuckBuilds(3600 * Integer.parseInt(MAX_DURATION_IN_HOURS), Jenkins.instance.items[i])){
+         common.errorMsg("Kill failed!")
+      }
     }
   }
-}
-
-@NonCPS
-def getRunningBuilds(job){
-  return job.builds.findAll{build -> build.isBuilding()}
-}
-
-@NonCPS
-def killStuckBuilds(maxSeconds, job){
-    def result = false
-    def runningBuilds = getRunningBuilds(job)
-      def jobName = job.name
-      for(int j=0; j < runningBuilds.size(); j++){
-        int durationInSeconds = (System.currentTimeMillis() - runningBuilds[j].getTimeInMillis())/1000.0
-        if(durationInSeconds > maxSeconds){
-          def buildId = runningBuilds[j].id
-          common.infoMsg("Aborting ${jobName}-${buildId} which is running for ${durationInSeconds}s")
-          try{
-            runningBuilds[j].finish(hudson.model.Result.ABORTED, new java.io.IOException("Aborting build by long running jobs killer"));
-          }catch(e){
-            common.errorMsg("Error occured during aborting build: Exception: ${e}")
-          }
-          result = true
-        }
-      }
-      return result
 }
\ No newline at end of file
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index d67c01d..fe99ecb 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -163,6 +163,12 @@
             }
 
             stage("Deploy Docker services") {
+                // We need /etc/aptly-publisher.yaml to be present before
+                // services are deployed
+                // XXX: for some weird unknown reason, refresh_pillar is
+                // required to execute here
+                salt.runSaltProcessStep(master, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
                 retry(3) {
                     sleep(5)
                     salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
@@ -318,7 +324,7 @@
             // Cleanup
             if (HEAT_STACK_DELETE.toBoolean() == true) {
                 stage('Trigger cleanup job') {
-                    build job: 'deploy-heat-cleanup', parameters: [[$class: 'StringParameterValue', name: 'HEAT_STACK_NAME', value: HEAT_STACK_NAME]]
+                    build job: 'deploy-stack-cleanup', parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME]]
                 }
             }
         }
diff --git a/cleanup-pipeline.groovy b/cleanup-pipeline.groovy
index 8b04990..282b041 100644
--- a/cleanup-pipeline.groovy
+++ b/cleanup-pipeline.groovy
@@ -2,41 +2,77 @@
  *
  * Delete heat stack pipeline
  *
- * Expected parameters:
- *   OPENSTACK_API_URL          OpenStack API address
- *   OPENSTACK_API_CREDENTIALS  Credentials to the OpenStack API
- *   OPENSTACK_API_PROJECT      OpenStack project to connect to
- *   OPENSTACK_API_CLIENT       Versions of OpenStack python clients
- *   OPENSTACK_API_VERSION      Version of the OpenStack API (2/3)
- *   HEAT_STACK_NAME            Heat stack name
+ * General
+ *  STACK_NAME                 Heat stack name
+ *  STACK_TYPE                 Type of the stack (heat, aws)
  *
+ * Heat parameters:
+ *  OPENSTACK_API_URL            OpenStack API address
+ *  OPENSTACK_API_CREDENTIALS    Credentials to the OpenStack API
+ *  OPENSTACK_API_PROJECT        OpenStack project to connect to
+ *  OPENSTACK_API_PROJECT_DOMAIN Domain for OpenStack project
+ *  OPENSTACK_API_PROJECT_ID     ID for OpenStack project
+ *  OPENSTACK_API_USER_DOMAIN    Domain for OpenStack user
+ *  OPENSTACK_API_CLIENT         Versions of OpenStack python clients
+ *  OPENSTACK_API_VERSION        Version of the OpenStack API (2/3)
+ *
+ * AWS parameters:
+ *  AWS_API_CREDENTIALS        Credentials id AWS EC2 API
+ *  AWS_DEFAULT_REGION         EC2 region
  */
+
 common = new com.mirantis.mk.Common()
 git = new com.mirantis.mk.Git()
 openstack = new com.mirantis.mk.Openstack()
+aws = new com.mirantis.mk.Aws()
 salt = new com.mirantis.mk.Salt()
 
 node {
 
-    // connection objects
-    def openstackCloud
+    def venv_path = "${env.WORKSPACE}/venv"
+    def env_vars
 
-    // value defaults
-    def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-    def openstackEnv = "${env.WORKSPACE}/venv"
-
-    stage('Install OpenStack env') {
-        openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+    // default STACK_TYPE is heat
+    if (!env.getEnvironment().containsKey("STACK_TYPE") || STACK_TYPE == '') {
+        STACK_TYPE = 'heat'
     }
 
-    stage('Connect to OpenStack cloud') {
-        openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
-        openstack.getKeystoneToken(openstackCloud, openstackEnv)
+    stage('Install environment') {
+        if (STACK_TYPE == 'heat') {
+
+            def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+            openstack.setupOpenstackVirtualenv(venv_path, openstackVersion)
+
+        } else if (STACK_TYPE == 'aws') {
+
+            env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_DEFAULT_REGION)
+            aws.setupVirtualEnv(venv_path)
+
+        } else {
+            throw new Exception('Stack type is not supported')
+        }
+
     }
 
-    stage('Delete Heat stack') {
-        common.infoMsg("Deleting Heat Stack " + HEAT_STACK_NAME)
-        openstack.deleteHeatStack(openstackCloud, HEAT_STACK_NAME, openstackEnv)
+    stage('Delete stack') {
+        if (STACK_TYPE == 'heat') {
+            def openstackCloud = openstack.createOpenstackEnv(
+                OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                OPENSTACK_API_PROJECT,OPENSTACK_API_PROJECT_DOMAIN,
+                OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                OPENSTACK_API_VERSION)
+            openstack.getKeystoneToken(openstackCloud, venv_path)
+
+            common.infoMsg("Deleting Heat Stack " + STACK_NAME)
+            openstack.deleteHeatStack(openstackCloud, STACK_NAME, venv_path)
+        } else if (STACK_TYPE == 'aws') {
+
+            aws.deleteStack(venv_path, env_vars, STACK_NAME)
+
+        } else {
+            throw new Exception('Stack type is not supported')
+        }
+
     }
 
 }
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index b34db68..c04e5d2 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -17,6 +17,8 @@
  *   STACK_TEST                 Run tests (bool)
  *   STACK_CLEANUP_JOB          Name of job for deleting stack
  *
+ *   STACK_COMPUTE_COUNT        Number of compute nodes to launch
+ *
  *   AWS_STACK_REGION           CloudFormation AWS region
  *   AWS_API_CREDENTIALS        AWS Access key ID with  AWS secret access key
  *   AWS_SSH_KEY                AWS key pair name (used for SSH access)
@@ -50,6 +52,13 @@
 _MAX_PERMITTED_STACKS = 2
 overwriteFile = "/srv/salt/reclass/classes/cluster/override.yml"
 
+def saltMaster
+
+if (STACK_TYPE == 'aws') {
+    venv_path = 'aws_venv'
+    env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+}
+
 timestamps {
     node {
         try {
@@ -121,7 +130,6 @@
                     SALT_MASTER_URL = "http://${saltMasterHost}:6969"
                 } else if (STACK_TYPE == 'aws') {
 
-                    def venv_path = 'aws_venv'
 
                     if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
                         error("If you want to reuse existing stack you need to provide it's name")
@@ -144,9 +152,6 @@
                     // set description
                     currentBuild.description = STACK_NAME
 
-                    // prepare configuration
-                    def env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
-
                     if (STACK_REUSE.toBoolean() == false) {
                         // get templates
                         git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
@@ -155,8 +160,12 @@
                         aws.setupVirtualEnv(venv_path)
 
                         // start stack
-                        def stack_params = ["ParameterKey=KeyName,ParameterValue=" + AWS_SSH_KEY]
-                        aws.createStack(venv_path, env_vars, STACK_TEMPLATE, STACK_NAME, stack_params)
+                        def stack_params = [
+                            "ParameterKey=KeyName,ParameterValue=" + AWS_SSH_KEY,
+                            "ParameterKey=CmpNodeCount,ParameterValue=" + STACK_COMPUTE_COUNT
+                        ]
+                        def template_file = 'cfn/' + STACK_TEMPLATE + '.yml'
+                        aws.createStack(venv_path, env_vars, template_file, STACK_NAME, stack_params)
                     }
 
                     // wait for stack to be ready
@@ -170,14 +179,8 @@
                 } else {
                     throw new Exception("STACK_TYPE ${STACK_TYPE} is not supported")
                 }
-            }
 
-            //
-            // Connect to Salt master
-            //
-
-            def saltMaster
-            stage('Connect to Salt API') {
+                // Connect to Salt master
                 saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
 
@@ -201,22 +204,41 @@
             // install k8s
             if (common.checkContains('STACK_INSTALL', 'k8s')) {
                 stage('Install Kubernetes infra') {
+                    // configure kubernetes_control_address - save loadbalancer
+                    def kubernetes_control_address = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'ControlLoadBalancer')
+                    print(kubernetes_control_address)
+                    salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', kubernetes_control_address], null, true)
+
                     orchestrate.installKubernetesInfra(saltMaster)
                 }
 
                 stage('Install Kubernetes control') {
 
-                    // Overwrite Kubernetes vars if specified
-                    if (env.getEnvironment().containsKey("KUBERNETES_HYPERKUBE_IMAGE")) {
-                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, "    kubernetes_hyperkube_image: ${KUBERNETES_HYPERKUBE_IMAGE}")
-                    }
-
                     orchestrate.installKubernetesControl(saltMaster)
+
                 }
 
+                stage('Scale Kubernetes computes') {
+                    if (STACK_COMPUTE_COUNT > 0) {
+                        if (STACK_TYPE == 'aws') {
+
+                            // get stack info
+                            def scaling_group = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'ComputesScalingGroup')
+
+                            //update autoscaling group
+                            aws.updateAutoscalingGroup(venv_path, env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
+
+                            // wait for computes to boot up
+                            aws.waitForAutoscalingInstances(venv_path, env_vars, scaling_group)
+                            sleep(60)
+                        }
+
+                        orchestrate.installKubernetesCompute(saltMaster)
+                    }
+                }
 
                 if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    state('Install Contrail for Kubernetes') {
+                    stage('Install Contrail for Kubernetes') {
                         orchestrate.installContrailNetwork(saltMaster)
                         orchestrate.installContrailCompute(saltMaster)
                     }
@@ -337,21 +359,24 @@
             // Clean
             //
 
-            if (STACK_TYPE == 'heat') {
+            if (STACK_NAME && STACK_NAME != '') {
                 // send notification
                 common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
+            }
 
-                if (STACK_DELETE.toBoolean() == true) {
-                    common.errorMsg('Heat job cleanup triggered')
-                    stage('Trigger cleanup job') {
-                        build job: 'deploy-heat-cleanup', parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME]]
-                    }
-                } else {
-                    if (currentBuild.result == 'FAILURE') {
-                        common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
-                        if (SALT_MASTER_URL) {
-                            common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
-                        }
+            if (STACK_DELETE.toBoolean() == true) {
+                stage('Trigger cleanup job') {
+                    common.errorMsg('Stack cleanup job triggered')
+                    build(job: STACK_CLEANUP_JOB, parameters: [
+                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
+                        [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE]
+                    ])
+                }
+            } else {
+                if (currentBuild.result == 'FAILURE') {
+                    common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
+                    if (SALT_MASTER_URL) {
+                        common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
                     }
                 }
             }
diff --git a/docker-build-to-jfrog.groovy b/docker-build-to-jfrog.groovy
index 83fe66b..7f91a54 100644
--- a/docker-build-to-jfrog.groovy
+++ b/docker-build-to-jfrog.groovy
@@ -17,7 +17,6 @@
   def buildInfo = Artifactory.newBuildInfo()
 
   def projectNamespace = "mirantis/${PROJECT_NAMESPACE}"
-  def projectModule = "${GERRIT_PROJECT}"
 
   def dockerRepository = DOCKER_REGISTRY
   def docker_dev_repo = "docker-dev-local"
@@ -42,7 +41,7 @@
       ).trim().take(12)
       imageTagsList << "${GERRIT_CHANGE_NUMBER}_${GERRIT_PATCHSET_NUMBER}"
       for (imageTag in imageTagsList) {
-        sh "docker tag ${containerId} ${dockerRepository}/${projectNamespace}/${projectModule}:${imageTag}"
+        sh "docker tag ${containerId} ${dockerRepository}/${projectNamespace}/${IMAGE_NAME}:${imageTag}"
       }
     }
     stage("publish image"){
@@ -50,7 +49,7 @@
         for (imageTag in imageTagsList) {
           artifactory.uploadImageToArtifactory(artifactoryServer,
                                                dockerRepository,
-                                               "${projectNamespace}/${projectModule}",
+                                               "${projectNamespace}/${IMAGE_NAME}",
                                                imageTag,
                                                docker_dev_repo,
                                                buildInfo)
@@ -72,7 +71,7 @@
           artifactory.promoteDockerArtifact(artifactoryServer.getUrl(),
                                             docker_dev_repo,
                                             docker_prod_repo,
-                                            "${projectNamespace}/${projectModule}",
+                                            "${projectNamespace}/${IMAGE_NAME}",
                                             buildProperties.get('com.mirantis.targetTag').join(','),
                                             'latest')
         } else {
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index e64abda..b41bb5f 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -72,13 +72,13 @@
                 def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
-    _param:
-        linux_system_codename: xenial
-        reclass_data_revision: master
-    linux:
-        system:
-            name: cfg01
-            domain: ${clusterDomain}
+  _param:
+    linux_system_codename: xenial
+    reclass_data_revision: master
+  linux:
+    system:
+      name: cfg01
+      domain: ${clusterDomain}
 """
                 sh "mkdir -p ${modelEnv}/nodes/"
                 writeFile(file: nodeFile, text: nodeString)
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index 1158a74..255335d 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -17,14 +17,17 @@
  *
  * Expected parameters:
  * required for STACK_TYPE=heat
- *   HEAT_STACK_ENVIRONMENT     Heat stack environmental parameters
- *   HEAT_STACK_ZONE            Heat stack availability zone
- *   HEAT_STACK_PUBLIC_NET      Heat stack floating IP pool
- *   OPENSTACK_API_URL          OpenStack API address
- *   OPENSTACK_API_CREDENTIALS  Credentials to the OpenStack API
- *   OPENSTACK_API_PROJECT      OpenStack project to connect to
- *   OPENSTACK_API_CLIENT       Versions of OpenStack python clients
- *   OPENSTACK_API_VERSION      Version of the OpenStack API (2/3)
+ *   HEAT_STACK_ENVIRONMENT       Heat stack environmental parameters
+ *   HEAT_STACK_ZONE              Heat stack availability zone
+ *   HEAT_STACK_PUBLIC_NET        Heat stack floating IP pool
+ *   OPENSTACK_API_URL            OpenStack API address
+ *   OPENSTACK_API_CREDENTIALS    Credentials to the OpenStack API
+ *   OPENSTACK_API_PROJECT        OpenStack project to connect to
+ *   OPENSTACK_API_PROJECT_DOMAIN Domain for OpenStack project
+ *   OPENSTACK_API_PROJECT_ID     ID for OpenStack project
+ *   OPENSTACK_API_USER_DOMAIN    Domain for OpenStack user
+ *   OPENSTACK_API_CLIENT         Versions of OpenStack python clients
+ *   OPENSTACK_API_VERSION        Version of the OpenStack API (2/3)
  *
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API
  *
@@ -41,8 +44,9 @@
  *   CALICO_CNI_IMAGE            Docker repository and tag for calico CNI image
  *   CALICO_NODE_IMAGE           Docker repository and tag for calico node image
  *   CALICOCTL_IMAGE             Docker repository and tag for calicoctl image
+ *   MTU                         MTU for Calico
  *   NETCHECKER_AGENT_IMAGE      Docker repository and tag for netchecker agent image
- *   NETCHECKER_SERVER_IMAGE      Docker repository and tag for netchecker server image
+ *   NETCHECKER_SERVER_IMAGE     Docker repository and tag for netchecker server image
  *
  */
 
@@ -53,11 +57,7 @@
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
 
-
-
-
 _MAX_PERMITTED_STACKS = 2
-overwriteFile = "/srv/salt/reclass/classes/cluster/overwrite.yml"
 
 timestamps {
     node {
@@ -108,13 +108,17 @@
 
                     // create openstack env
                     openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-                    openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+                    openstackCloud = openstack.createOpenstackEnv(
+                        OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                        OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+                        OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                        OPENSTACK_API_VERSION)
                     openstack.getKeystoneToken(openstackCloud, openstackEnv)
                     //
                     // Verify possibility of create stack for given user and stack type
                     //
                     wrap([$class: 'BuildUser']) {
-                        if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !STACK_REUSE.toBoolean()) {
+                        if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !env.BUILD_USER_ID.equals("mceloud") && !STACK_REUSE.toBoolean()) {
                             def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
                             if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
                                 STACK_DELETE = "false"
@@ -170,6 +174,16 @@
 
             // install k8s
             if (common.checkContains('STACK_INSTALL', 'k8s')) {
+
+                stage('Install Contrail for Kubernetes') {
+                    if (common.checkContains('STACK_INSTALL', 'contrail')) {
+                        stage('Install Contrail for Kubernetes') {
+                            orchestrate.installContrailNetwork(saltMaster)
+                            orchestrate.installContrailCompute(saltMaster)
+                        }
+                    }
+                }
+
                 stage('Install Kubernetes infra') {
                     orchestrate.installKubernetesInfra(saltMaster)
                 }
@@ -177,39 +191,35 @@
                 stage('Install Kubernetes control') {
 
                     // Overwrite Kubernetes vars if specified
-                    if (env.getEnvironment().containsKey("KUBERNETES_HYPERKUBE_IMAGE")) {
-                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, "    kubernetes_hyperkube_image: ${KUBERNETES_HYPERKUBE_IMAGE}")
+                    if (env.getEnvironment().containsKey('KUBERNETES_HYPERKUBE_IMAGE')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_hyperkube_image', KUBERNETES_HYPERKUBE_IMAGE)
+                    }
+                    if (env.getEnvironment().containsKey('MTU')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_mtu', MTU)
                     }
                     // Overwrite Calico vars if specified
-                    if (env.getEnvironment().containsKey("CALICO_CNI_IMAGE")) {
-                      salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, "    kubernetes_calico_cni_image: ${CALICO_CNI_IMAGE}")
+                    if (env.getEnvironment().containsKey('CALICO_CNI_IMAGE')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_calico_cni_image', CALICO_CNI_IMAGE)
                     }
-                    if (env.getEnvironment().containsKey("CALICO_NODE_IMAGE")) {
-                      salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, "    kubernetes_calico_node_image: ${CALICO_NODE_IMAGE}")
+                    if (env.getEnvironment().containsKey('CALICO_NODE_IMAGE')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_calico_image', CALICO_NODE_IMAGE)
                     }
-                    if (env.getEnvironment().containsKey("CALICOCTL_IMAGE")) {
-                      salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, "    kubernetes_calicoctl_image: ${CALICOCTL_IMAGE}")
+                    if (env.getEnvironment().containsKey('CALICOCTL_IMAGE')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_calicoctl_image', CALICOCTL_IMAGE)
                     }
 
                     // Overwrite netchecker vars if specified
-                    if (env.getEnvironment().containsKey("NETCHECKER_AGENT_IMAGE")) {
-                      salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, "    kubernetes_netchecker_agent_image: ${NETCHECKER_AGENT_IMAGE}")
+                    if (env.getEnvironment().containsKey('NETCHECKER_AGENT_IMAGE')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_netchecker_agent_image', NETCHECKER_AGENT_IMAGE)
                     }
-                    if (env.getEnvironment().containsKey("NETCHECKER_SERVER_IMAGE")) {
-                      salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, "    kubernetes_netchecker_server_image: ${NETCHECKER_SERVER_IMAGE}")
+                    if (env.getEnvironment().containsKey('NETCHECKER_SERVER_IMAGE')) {
+                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_netchecker_server_image', NETCHECKER_SERVER_IMAGE)
                     }
 
-
                     orchestrate.installKubernetesControl(saltMaster)
                 }
 
 
-                if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    state('Install Contrail for Kubernetes') {
-                        orchestrate.installContrailNetwork(saltMaster)
-                        orchestrate.installContrailCompute(saltMaster)
-                    }
-                }
             }
 
             // install openstack
@@ -334,7 +344,7 @@
                 if (STACK_DELETE.toBoolean() == true) {
                     common.errorMsg('Heat job cleanup triggered')
                     stage('Trigger cleanup job') {
-                        build job: STACK_CLEANUP_JOB, parameters: [[$class: 'StringParameterValue', name: 'HEAT_STACK_NAME', value: STACK_NAME]]
+                        build job: STACK_CLEANUP_JOB, parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME]]
                     }
                 } else {
                     if (currentBuild.result == 'FAILURE') {
diff --git a/mk-k8s-simple-deploy-pipeline.groovy b/mk-k8s-simple-deploy-pipeline.groovy
index 4aae816..b86e6da 100644
--- a/mk-k8s-simple-deploy-pipeline.groovy
+++ b/mk-k8s-simple-deploy-pipeline.groovy
@@ -55,7 +55,7 @@
 
     stage('Connect to OpenStack cloud') {
         openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT,
-        OPENSTACK_API_PROJECT_DOMAIN_ID, OPENSTACK_API_USER_DOMAIN_ID)
+        "", OPENSTACK_API_PROJECT_DOMAIN_ID, OPENSTACK_API_USER_DOMAIN_ID, OPENSTACK_API_VERSION)
         openstack.getKeystoneToken(openstackCloud, openstackEnv)
     }
 
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
new file mode 100644
index 0000000..d6b08c3
--- /dev/null
+++ b/openstack-compute-install.groovy
@@ -0,0 +1,74 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *   TARGET_SERVERS             Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def targetAll = ['expression': TARGET_SERVERS, 'type': 'compound']
+def minions
+def result
+def command
+def commandKwargs
+
+
+node() {
+    try {
+
+        stage('Connect to Salt master') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage('List target servers') {
+            minions = salt.getMinions(saltMaster, targetAll)
+
+            if (minions.isEmpty()) {
+                throw new Exception("No minion was targeted")
+            }
+
+            targetLiveAll = minions.join(' or ')
+            common.infoMsg("Found nodes: ${targetLiveAll}")
+            common.infoMsg("Selected nodes: ${targetLiveAll}")
+        }
+
+        stage("Setup network for compute") {
+            common.infoMsg("Now all network configuration will be enforced, which caused reboot of nodes: ${targetLiveAll}")
+            try {
+                salt.cmdRun(saltMaster, targetLiveAll, 'salt-call state.sls linux.system.user,openssh,linux.network;reboot')
+            } catch(e) {
+                common.infoMsg("no respond from nodes due reboot")
+            }
+            common.infoMsg("Now pipeline is waiting until node reconnect to salt master")
+            timeout(800) {
+                retry(666) {
+                    try {
+                        salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], 'test.ping')
+                    } catch(e) {
+                        common.infoMsg("Still waiting for node to come up")
+                        sleep(10)
+                    }
+                }
+            }
+        }
+
+        stage("Deploy Compute") {
+            common.infoMsg("Lets run rest of the states to finish deployment")
+            salt.enforceState(saltMaster, targetLiveAll, 'linux,openssh,ntp,salt', true)
+            retry(2) {
+                salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], 'state.apply')
+            }
+        }
+
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    }
+}
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
new file mode 100644
index 0000000..cde839c
--- /dev/null
+++ b/openstack-compute-upgrade.groovy
@@ -0,0 +1,112 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *   TARGET_SERVERS             Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ *   TARGET_PACKAGES            Space delimited list of packages to be updates [package1=version package2=version], empty string means all updating all packages to the latest version.
+ *   TARGET_SUBSET_TEST         Number of nodes to list package updates, empty string means all targetted nodes.
+ *   TARGET_SUBSET_LIVE         Number of selected nodes to live apply selected package update.
+ *   TARGET_BATCH_LIVE          Batch size for the complete live package update on all nodes, empty string means apply to all targetted nodes.
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def targetAll = ['expression': TARGET_SERVERS, 'type': 'compound']
+def targetTestSubset
+def targetLiveSubset
+def targetLiveAll
+def minions
+def result
+def packages
+def command
+def commandKwargs
+
+node() {
+    try {
+
+        stage('Connect to Salt master') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage('List target servers') {
+            minions = salt.getMinions(saltMaster, targetAll)
+
+            if (minions.isEmpty()) {
+                throw new Exception("No minion was targeted")
+            }
+
+            if (TARGET_SUBSET_TEST != "") {
+                targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
+            } else {
+                targetTestSubset = minions.join(' or ')
+            }
+            targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
+
+            targetLiveAll = minions.join(' or ')
+            common.infoMsg("Found nodes: ${targetLiveAll}")
+            common.infoMsg("Selected test nodes: ${targetTestSubset}")
+            common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+        }
+
+        stage("Add new repos on sample") {
+            salt.enforceState(saltMaster, targetTestSubset, 'linux.system')
+        }
+
+        stage("List package upgrades") {
+            salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+        }
+
+        stage('Confirm live package upgrades on sample') {
+            if(TARGET_PACKAGES==""){
+                timeout(time: 2, unit: 'HOURS') {
+                    def userInput = input(
+                     id: 'userInput', message: 'Insert package names for update', parameters: [
+                     [$class: 'TextParameterDefinition', defaultValue: '', description: 'Package names (or *)', name: 'packages']
+                    ])
+                    if(userInput!= "" && userInput!= "*"){
+                        TARGET_PACKAGES = userInput
+                    }
+                }
+            }else{
+                timeout(time: 2, unit: 'HOURS') {
+                   input message: "Approve live package upgrades on ${targetLiveSubset} nodes?"
+                }
+            }
+        }
+
+        if (TARGET_PACKAGES != "") {
+            command = "pkg.install"
+            packages = TARGET_PACKAGES.tokenize(' ')
+            commandKwargs = ['only_upgrade': 'true']
+        }else {
+            command = "pkg.upgrade"
+            packages = null
+        }
+
+        stage('Apply package upgrades on sample') {
+            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
+            salt.printSaltCommandResult(out)
+        }
+
+        stage('Confirm package upgrades on all nodes') {
+            timeout(time: 2, unit: 'HOURS') {
+               input message: "Approve live package upgrades on ${targetLiveAll} nodes?"
+            }
+        }
+
+        stage('Apply package upgrades on all nodes') {
+            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
+            salt.printSaltCommandResult(out)
+        }
+
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    }
+}
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 7dd2ea1..8e841b9 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -81,7 +81,7 @@
                 // salt 'kvm02*' state.sls salt.control
                 salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
 
-                sleep(60)
+                sleep(70)
 
                 // salt '*' saltutil.refresh_pillar
                 salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
@@ -115,16 +115,24 @@
                 // salt -C 'I@backupninja:client' state.sls backupninja
                 salt.enforceState(saltMaster, 'I@backupninja:client', 'backupninja')
                 salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
-                salt.cmdRun(saltMaster, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
+                try {
+                    salt.cmdRun(saltMaster, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
+                } catch (Exception e) {
+                    common.warningMsg('The ARP entry does not exist. We should continue to run.')
+                }
                 salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
                 salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
                 salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
 
+                salt.enforceState(saltMaster, 'I@xtrabackup:server', 'xtrabackup')
+                salt.enforceState(saltMaster, 'I@xtrabackup:client', 'openssh.client')
+                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
 
                 def databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
                 if(databases && databases != ""){
                     def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
-                    for( i = 0; i < databasesList.size(); i++){ 
+                    for( i = 0; i < databasesList.size(); i++){
                         if(databasesList[i].toLowerCase().contains('upgrade')){
                             salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
                             common.warningMsg("removing database ${databasesList[i]}")
@@ -138,6 +146,7 @@
 
                 try {
                     salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+                    salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
                 } catch (Exception e) {
                     common.warningMsg('Restarting Apache2')
                     salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
@@ -268,9 +277,7 @@
                 salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
                 salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
 
-
-                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
-                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
 
                 try {
                     salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
@@ -281,7 +288,7 @@
                 // salt 'kvm*' state.sls salt.control
                 salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
 
-                sleep(60)
+                sleep(70)
 
                 // salt '*' saltutil.refresh_pillar
                 salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
@@ -305,6 +312,7 @@
                 try {
                     try {
                         salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
+                        salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
                     } catch (Exception e) {
                         common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
                         salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
@@ -357,27 +365,62 @@
                 } catch (Exception e) {
                     errorOccured = true
                     common.warningMsg('Some states that require syncdb failed. Restoring production databases')
-                    databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
-                    if(databases && databases != ""){
-                        databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
-                        for( i = 0; i < databasesList.size(); i++){ 
-                            if(!databasesList[i].toLowerCase().contains('upgrade') && !databasesList[i].toLowerCase().contains('command execution')){
-                                salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
-                                common.warningMsg("removing database ${databasesList[i]}")
-                                salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
-                            }
-                        }
-                        salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
-                    }else{
-                        common.errorMsg("No none _upgrade databases were returned. You have to restore production databases before running the real control upgrade again. This is because database schema for some services already happened. To do that delete the production databases, remove none upgrade database files from /root/mysql/flags/ and run salt 'I@mysql:client' state.sls mysql.client on the salt-master node")
+
+                    // database restore section
+                    try {
+                        salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+                    } catch (Exception er) {
+                        common.warningMsg('Mysql service already stopped')
                     }
+                    try {
+                        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+                    } catch (Exception er) {
+                        common.warningMsg('Mysql service already stopped')
+                    }
+                    try {
+                        salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+                    } catch (Exception er) {
+                        common.warningMsg('Files are not present')
+                    }
+                    try {
+                        salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+                    } catch (Exception er) {
+                        common.warningMsg('Directory already exists')
+                    }
+                    try {
+                        salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+                    } catch (Exception er) {
+                        common.warningMsg('Directory already empty')
+                    }
+                    try {
+                        salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+                    } catch (Exception er) {
+                        common.warningMsg('Files were already moved')
+                    }
+                    try {
+                        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+                    } catch (Exception er) {
+                        common.warningMsg('File is not present')
+                    }
+                    salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+                    _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+                    backup_dir = _pillar['return'][0].values()[0]
+                    if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+                    print(backup_dir)
+                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+                    salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+                    sleep(5)
+                    salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+                    //
+
                     common.errorMsg("Stage Real control upgrade failed")
                 }
                 if(!errorOccured){
                     // salt 'cmp*' cmd.run 'service nova-compute restart'
                     salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
 
-                    // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog - TODO: proč? už to jednou projelo
+                    // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
                     // salt 'ctl*' state.sls keepalived
                     // salt 'prx*' state.sls keepalived
                     salt.enforceState(saltMaster, 'prx*', 'keepalived')
@@ -447,20 +490,43 @@
                     common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
                 }
 
-                databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
-                if(databases && databases != ""){
-                    databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
-                    for( i = 0; i < databasesList.size(); i++){ 
-                        if(!databasesList[i].toLowerCase().contains('upgrade') && !databasesList[i].toLowerCase().contains('command execution')){
-                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
-                            common.warningMsg("removing database ${databasesList[i]}")
-                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
-                        }
-                    }
-                    salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
-                }else{
-                    common.errorMsg("No none _upgrade databases were returned")
+                // database restore section
+                try {
+                    salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+                } catch (Exception e) {
+                    common.warningMsg('Mysql service already stopped')
                 }
+                try {
+                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+                } catch (Exception e) {
+                    common.warningMsg('Mysql service already stopped')
+                }
+                try {
+                    salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+                } catch (Exception e) {
+                    common.warningMsg('Files are not present')
+                }
+                try {
+                    salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+                } catch (Exception e) {
+                    common.warningMsg('Directory already empty')
+                }
+                try {
+                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+                } catch (Exception e) {
+                    common.warningMsg('File is not present')
+                }
+                salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+                _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+                backup_dir = _pillar['return'][0].values()[0]
+                if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+                print(backup_dir)
+                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+                sleep(5)
+                salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+                //
 
                 salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
                 salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
@@ -471,7 +537,7 @@
                 // salt 'cmp*' cmd.run 'service nova-compute restart'
                 salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
 
-                sleep(60)
+                sleep(70)
 
                 salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
             }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 0d8d319..693c4b0 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -1,10 +1,11 @@
 common = new com.mirantis.mk.Common()
+gerrit = new com.mirantis.mk.Gerrit()
 git = new com.mirantis.mk.Git()
 python = new com.mirantis.mk.Python()
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
 
-def generateSaltMaster(modelEnv) {
-    def nodeFile = "${modelEnv}/nodes/cfg01.${clusterDomain}.yml"
+def generateSaltMaster(modEnv, clusterDomain, clusterName) {
+    def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
     def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
@@ -16,57 +17,71 @@
             name: cfg01
             domain: ${clusterDomain}
 """
-    sh "mkdir -p ${modelEnv}/nodes/"
+    sh "mkdir -p ${modEnv}/nodes/"
+    println "Create file ${nodeFile}"
     writeFile(file: nodeFile, text: nodeString)
 }
 
-def generate(contextFile) {
-    def templateEnv = "${env.WORKSPACE}/template"
-    def baseName = sh(script: "basename ${contextFile} .yml", returnStdout: true)
-    def modelEnv = "${env.WORKSPACE}/model-${baseName}"
-    def cookiecutterTemplateContext = readFile(file: "${env.WORKSPACE}/contexts/contextFile")
-    def templateContext = readYaml text: cookiecutterTemplateContext
+def generateModel(modelFile, cutterEnv) {
+    def templateEnv = "${env.WORKSPACE}"
+    def modelEnv = "${env.WORKSPACE}/model"
+    def basename = sh(script: "basename ${modelFile} .yml", returnStdout: true).trim()
+    def generatedModel = "${modelEnv}/${basename}"
+    def testEnv = "${env.WORKSPACE}/test"
+    def content = readFile(file: "${templateEnv}/contexts/${modelFile}")
+    def templateContext = readYaml text: content
     def clusterDomain = templateContext.default_context.cluster_domain
     def clusterName = templateContext.default_context.cluster_name
-    def cutterEnv = "${env.WORKSPACE}/cutter"
-    def jinjaEnv = "${env.WORKSPACE}/jinja"
-    def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+    def outputDestination = "${generatedModel}/classes/cluster/${clusterName}"
     def targetBranch = "feature/${clusterName}"
-    def templateBaseDir = "${env.WORKSPACE}/template"
-    def templateDir = "${templateEnv}/template/dir"
+    def templateBaseDir = "${env.WORKSPACE}"
+    def templateDir = "${templateEnv}/dir"
     def templateOutputDir = templateBaseDir
-    sh("rm -rf ${templateBaseDir} || true")
+    sh "rm -rf ${generatedModel} || true"
 
     def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
     for (product in productList) {
-        def stagename = (product == "infra") ? "Generate base infrastructure" : "Generate product ${product}"
-        println stagename
         if (product == "infra" || (templateContext.default_context["${product}_enabled"]
             && templateContext.default_context["${product}_enabled"].toBoolean())) {
             templateDir = "${templateEnv}/cluster_product/${product}"
-            templateOutputDir = "${env.WORKSPACE}/template/output/${product}"
+            templateOutputDir = "${env.WORKSPACE}/output/${product}"
+            sh "rm -rf ${templateOutputDir} || true"
             sh "mkdir -p ${templateOutputDir}"
             sh "mkdir -p ${outputDestination}"
-            python.setupCookiecutterVirtualenv(cutterEnv)
-            python.buildCookiecutterTemplate(templateDir, cookiecutterTemplateContext, templateOutputDir, cutterEnv, templateBaseDir)
+            python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, cutterEnv, templateBaseDir)
             sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
         }
     }
-    generateSaltMaster(modelEnv)
+    generateSaltMaster(generatedModel, clusterDomain, clusterName)
 }
 
-def testModel(contextFile) {
-    def baseName = sh(script: "basename ${contextFile} .yml", returnStdout: true)
-    def modelEnv = "${env.WORKSPACE}/model-${baseName}"
-    git.checkoutGitRepository("${modelEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, RECLASS_MODEL_CREDENTIALS)
-    saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", "", modelEnv)
+def testModel(modelFile, testEnv) {
+    def templateEnv = "${env.WORKSPACE}"
+    def content = readFile(file: "${templateEnv}/contexts/${modelFile}.yml")
+    def templateContext = readYaml text: content
+    def clusterDomain = templateContext.default_context.cluster_domain
+    git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+    saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", EXTRA_FORMULAS, testEnv)
+}
+
+def gerritRef
+try {
+  gerritRef = GERRIT_REFSPEC
+} catch (MissingPropertyException e) {
+  gerritRef = null
 }
 
 timestamps {
     node("python&&docker") {
-        def templateEnv = "${env.WORKSPACE}/template"
+        def templateEnv = "${env.WORKSPACE}"
+        def cutterEnv = "${env.WORKSPACE}/cutter"
+        def jinjaEnv = "${env.WORKSPACE}/jinja"
 
         try {
+            stage("Cleanup") {
+                sh("rm -rf * || true")
+            }
+
             stage ('Download Cookiecutter template') {
                 if (gerritRef) {
                     def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
@@ -79,29 +94,40 @@
                         common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
                     }
                 } else {
-                    gerrit.gerritPatchsetCheckout(COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, "HEAD", CREDENTIALS_ID)
+                    git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
                 }
             }
 
+            stage("Setup") {
+                python.setupCookiecutterVirtualenv(cutterEnv)
+            }
+
             def contextFiles
-            dir("contexts") {
+            dir("${templateEnv}/contexts") {
                 contextFiles = findFiles(glob: "*.yml")
             }
 
-            for (contextFile in contextFiles) {
-                generate(contextFile)
+            def contextFileList = []
+            for (int i = 0; i < contextFiles.size(); i++) {
+                contextFileList << contextFiles[i]
+            }
+
+            stage("generate-model") {
+                for (contextFile in contextFileList) {
+                    generateModel(contextFile, cutterEnv)
+                }
             }
 
             stage("test-nodes") {
-                def partitions = common.partitionList(contextFiles, 3)
+                def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
                 def buildSteps = [:]
                 for (int i = 0; i < partitions.size(); i++) {
                     def partition = partitions[i]
                     buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
                     for(int k = 0; k < partition.size; k++){
                         def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
-                        def modelEnv = "${env.WORKSPACE}/model-${baseName}"
-                        buildSteps.get("partition-${i}").put(basename, { saltModelTesting.setupAndTestNode(basename, "", modelEnv) })
+                        def testEnv = "${env.WORKSPACE}/model/${basename}"
+                        buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv) })
                     }
                 }
                 common.serial(buildSteps)
@@ -112,7 +138,7 @@
              throw e
         } finally {
             stage ('Clean workspace directories') {
-                sh(returnStatus: true, script: "rm -rfv *")
+                sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
             }
             common.sendNotification(currentBuild.result,"",["slack"])
         }
diff --git a/test-nodejs-pipeline.groovy b/test-nodejs-pipeline.groovy
index d659b7e..b1024cc 100644
--- a/test-nodejs-pipeline.groovy
+++ b/test-nodejs-pipeline.groovy
@@ -9,12 +9,12 @@
 gerrit = new com.mirantis.mk.Gerrit()
 common = new com.mirantis.mk.Common()
 
-def executeCmd(containerId, cmd) {
+def executeCmd(containerName, cmd) {
     stage(cmd) {
-        assert containerId != null
+        assert containerName != null
         common.infoMsg("Starting command: ${cmd}")
         def output = sh(
-            script: "docker exec ${containerId} ${cmd}",
+            script: "docker exec ${containerName} ${cmd}",
             returnStdout: true,
         )
         common.infoMsg(output)
@@ -39,8 +39,8 @@
 }
 def checkouted = false
 
-node("docker") {
-    def containerId
+node("vm") {
+    def containerName
     def uniqId
     try {
         stage('Checkout source code') {
@@ -60,7 +60,7 @@
         stage('Generate config file for devops portal') {
             writeFile (
                 file: "${workspace}/test_config.json",
-                text: '${JSON_CONFIG}'
+                text: "${JSON_CONFIG}"
             )
        }
        stage('Start container') {
@@ -72,14 +72,16 @@
                 uniqId = defaultGitRef.tokenize('/').takeRight(2).join('') + timeStamp
             }
             sh("docker-compose -f ${COMPOSE_PATH} -p ${uniqId} up -d")
-            containerId = "${uniqId}_devopsportal_1"
-            common.successMsg("Container with id ${containerId} started.")
-            sh("docker cp ${workspace}/. ${containerId}:/opt/workspace/")
+            containerName = "${uniqId}_devopsportal_1"
+            common.successMsg("Container with id ${containerName} started.")
+            sh("docker cp ${workspace}/. ${containerName}:/opt/workspace/")
         }
-        executeCmd(containerId, "npm install")
+        executeCmd(containerName, "npm install")
         def cmds = COMMANDS.tokenize('\n')
         for (int i = 0; i < cmds.size(); i++) {
-           executeCmd(containerId, cmds[i])
+           timeout(5) {
+               executeCmd(containerName, cmds[i])
+           }
         }
     } catch (err) {
         currentBuild.result = 'FAILURE'
@@ -88,14 +90,14 @@
     } finally {
         common.sendNotification(currentBuild.result, "" ,["slack"])
         stage('Cleanup') {
-            if (containerId != null) {
+            if (containerName != null) {
                 dockerCleanupCommands = ['stop', 'rm -f']
                 for (int i = 0; i < dockerCleanupCommands.size(); i++) {
                     sh("docker-compose -f ${COMPOSE_PATH} -p ${uniqId} ${dockerCleanupCommands[i]} || true")
                 }
                 sh("docker network rm ${uniqId}_default || true")
                 sh("rm -f ${workspace}/test_config.json || true")
-                common.infoMsg("Container with id ${containerId} was removed.")
+                common.infoMsg("Container with id ${containerName} was removed.")
             }
         }
     }
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 6baa25e..bfa38e6 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -25,9 +25,9 @@
     defaultGitUrl = null
 }
 
-def checkouted = false;
+def checkouted = false
 
-node("python&&docker") {
+node("python") {
   try{
     stage("stop old tests"){
       if (gerritRef) {
@@ -41,69 +41,85 @@
     stage("checkout") {
       if (gerritRef) {
         // job is triggered by Gerrit
-        checkouted = gerrit.gerritPatchsetCheckout ([
-          credentialsId : CREDENTIALS_ID
-        ])
+        def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
+        // test WIP contains in commit message
+        if(gerritChange.commitMessage.contains("WIP")){
+          common.successMsg("Commit message contains WIP, skipping tests") // do nothing
+        }else{
+          // test if change aren't already merged
+          def merged = gerritChange.status == "MERGED"
+          if(!merged){
+            checkouted = gerrit.gerritPatchsetCheckout ([
+              credentialsId : CREDENTIALS_ID
+            ])
+          } else{
+            common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
+          }
+        }
       } else if(defaultGitRef && defaultGitUrl) {
           checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
-      }
-      if(!checkouted){
+      } else {
         throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
       }
     }
     stage("test") {
       if(checkouted){
-        wrap([$class: 'AnsiColorBuildWrapper']) {
-          sh("make clean")
-          sh("[ $SALT_VERSION != 'latest' ] || export SALT_VERSION=''; make test")
+          wrap([$class: 'AnsiColorBuildWrapper']) {
+            sh("make clean")
+            sh("[ $SALT_VERSION != 'latest' ] || export SALT_VERSION=''; make test")
         }
       }
     }
     stage("kitchen") {
-      if (fileExists(".kitchen.yml")) {
-        common.infoMsg(".kitchen.yml found, running kitchen tests")
-        ruby.ensureRubyEnv()
-        def kitchenEnvs = []
-        if(fileExists(".travis.yml")){
-          common.infoMsg(".travis.yml found, running custom kitchen init")
-          def kitchenConfigYML = readYaml(file: ".travis.yml")
-          kitchenEnvs=kitchenConfigYML["env"]
-          def kitchenInit = kitchenConfigYML["install"]
-          def kitchenInstalled = false
-          if(kitchenInit && !kitchenInit.isEmpty()){
-            for(int i=0;i<kitchenInit.size();i++){
-              if(kitchenInit[i].trim().startsWith("test -e Gemfile")){ //found Gemfile config
-                common.infoMsg("Custom Gemfile configuration found, using them")
-                ruby.installKitchen(kitchenInit[i].trim())
-                kitchenInstalled = true
+      if(checkouted){
+        if (fileExists(".kitchen.yml")) {
+          common.infoMsg(".kitchen.yml found, running kitchen tests")
+          ruby.ensureRubyEnv()
+          def kitchenEnvs = []
+          if(fileExists(".travis.yml")){
+            common.infoMsg(".travis.yml found, running custom kitchen init")
+            def kitchenConfigYML = readYaml(file: ".travis.yml")
+            kitchenEnvs=kitchenConfigYML["env"]
+            def kitchenInit = kitchenConfigYML["install"]
+            def kitchenInstalled = false
+            if(kitchenInit && !kitchenInit.isEmpty()){
+              for(int i=0;i<kitchenInit.size();i++){
+                if(kitchenInit[i].trim().startsWith("test -e Gemfile")){ //found Gemfile config
+                  common.infoMsg("Custom Gemfile configuration found, using them")
+                  ruby.installKitchen(kitchenInit[i].trim())
+                  kitchenInstalled = true
+                }
               }
             }
-          }
-          if(!kitchenInstalled){
-            ruby.installKitchen()
-          }
-        }else{
-          common.infoMsg(".travis.yml not found, running default kitchen init")
-          ruby.installKitchen()
-        }
-        wrap([$class: 'AnsiColorBuildWrapper']) {
-          common.infoMsg("Running kitchen testing, parallel mode: " + KITCHEN_TESTS_PARALLEL.toBoolean())
-          if(!kitchenEnvs.isEmpty()){
-            for(int i=0;i<kitchenEnvs.size();i++){
-              common.infoMsg("Found multiple environment, kitchen running with env: " + kitchenEnvs[i])
-              ruby.runKitchenTests(kitchenEnvs[i], KITCHEN_TESTS_PARALLEL.toBoolean())
+            if(!kitchenInstalled){
+              ruby.installKitchen()
             }
           }else{
-            ruby.runKitchenTests("", KITCHEN_TESTS_PARALLEL.toBoolean())
+            common.infoMsg(".travis.yml not found, running default kitchen init")
+            ruby.installKitchen()
           }
+          common.infoMsg("Running kitchen testing, parallel mode: " + KITCHEN_TESTS_PARALLEL.toBoolean())
+          wrap([$class: 'AnsiColorBuildWrapper']) {
+            if(kitchenEnvs && !kitchenEnvs.isEmpty()){
+              common.infoMsg("Found multiple environment, first running kitchen without custom env")
+              ruby.runKitchenTests("", KITCHEN_TESTS_PARALLEL.toBoolean())
+              for(int i=0;i<kitchenEnvs.size();i++){
+                common.infoMsg("Found multiple environment, kitchen running with env: " + kitchenEnvs[i])
+                ruby.runKitchenTests(kitchenEnvs[i], KITCHEN_TESTS_PARALLEL.toBoolean())
+              }
+            }else{
+              ruby.runKitchenTests("", KITCHEN_TESTS_PARALLEL.toBoolean())
+            }
+          }
+        } else {
+          common.infoMsg(".kitchen.yml not found")
         }
-      } else {
-        common.infoMsg(".kitchen.yml not found")
       }
     }
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
+     ruby.runKitchenCommand("destroy")
      throw e
   } finally {
      if(currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")){
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 3ed8f61..c6f1a71 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -29,8 +29,7 @@
     defaultGitUrl = null
 }
 def checkouted = false
-def merged = false
-node("python&&docker") {
+node("python") {
   try{
     stage("stop old tests"){
       if (gerritRef) {
@@ -46,18 +45,25 @@
         // job is triggered by Gerrit
         // test if change aren't already merged
         def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
-        merged = gerritChange.status == "MERGED"
-        if(!merged){
-          checkouted = gerrit.gerritPatchsetCheckout ([
-            credentialsId : CREDENTIALS_ID
-          ])
-        } else{
-          common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+        // test WIP contains in commit message
+        if (gerritChange.commitMessage.contains("WIP")) {
+          common.successMsg("Commit message contains WIP, skipping tests") // do nothing
+        } else {
+          def merged = gerritChange.status == "MERGED"
+          if(!merged){
+            checkouted = gerrit.gerritPatchsetCheckout ([
+              credentialsId : CREDENTIALS_ID
+            ])
+          } else{
+            common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
+          }
         }
       } else if(defaultGitRef && defaultGitUrl) {
           checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
+      } else {
+        throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
       }
-      if(checkouted){
+      if(checkouted) {
         if (fileExists('classes/system')) {
           ssh.prepareSshAgentKey(CREDENTIALS_ID)
           dir('classes/system') {
@@ -66,43 +72,27 @@
           }
           ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
         }
-      }else if(!merged){
-        throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
       }
     }
 
     stage("test-nodes") {
-      def workspace = common.getWorkspace()
-      def nodes = sh(script: "find ./nodes -type f -name 'cfg*.yml'", returnStdout: true).tokenize()
-      def buildSteps = [:]
-      if(nodes.size() > 1){
-          if(nodes.size() <= 3 && PARALLEL_NODE_GROUP_SIZE.toInteger() != 1) {
-            common.infoMsg("Found <=3  cfg nodes, running parallel test")
-             for(int i=0; i < nodes.size();i++){
-               def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
-               buildSteps.put("node-${basename}", { saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace) })
-             }
-             parallel buildSteps
-          }else{
-            common.infoMsg("Found more than 3 cfg nodes or debug enabled, running parallel group test with ${PARALLEL_NODE_GROUP_SIZE} nodes")
-            def partitions = common.partitionList(nodes, PARALLEL_NODE_GROUP_SIZE.toInteger())
-            for (int i=0; i < partitions.size();i++) {
-              def partition = partitions[i]
-              buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
-              for(int k=0; k < partition.size;k++){
-                  def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
-                  buildSteps.get("partition-${i}").put(basename, { saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace) })
-              }
-            }
-            common.serial(buildSteps)
+      if(checkouted) {
+        def workspace = common.getWorkspace()
+        def nodes = sh(script: "find ./nodes -type f -name 'cfg*.yml'", returnStdout: true).tokenize()
+        def buildSteps = [:]
+        def partitionSize = (nodes.size() <= PARALLEL_NODE_GROUP_SIZE.toInteger()) ? nodes.size() : PARALLEL_NODE_GROUP_SIZE.toInteger()
+        def partitions = common.partitionList(nodes, partitionSize)
+        for (int i =0; i < partitions.size();i++) {
+          def partition = partitions[i]
+          buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
+          for(int k=0; k < partition.size;k++){
+              def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
+              buildSteps.get("partition-${i}").put(basename, { saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace) })
           }
-      }else{
-          common.infoMsg("Found one cfg node, running single test")
-          def basename = sh(script: "basename ${nodes[0]} .yml", returnStdout: true).trim()
-          saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace)
+        }
+        common.serial(buildSteps)
       }
     }
-
   } catch (Throwable e) {
      // If there was an error or exception thrown, the build failed
      currentBuild.result = "FAILURE"
@@ -118,7 +108,7 @@
   def jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
   def triggeredBuilds= gerrit.getGerritTriggeredBuilds(jenkinsUtils.getJobRunningBuilds(jobName), gerritChangeNumber, excludePatchsetNumber)
   def buildNums =[]
-  for(int i=0;i<triggeredBuilds.size();i++){
+  for (int i=0; i<triggeredBuilds.size(); i++) {
       buildNums.add(triggeredBuilds[i].number)
   }
   return buildNums
diff --git a/update-package.groovy b/update-package.groovy
index 1dd6e02..c6d008d 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -6,9 +6,9 @@
  *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
  *   TARGET_SERVERS             Salt compound target to match nodes to be updated [*, G@osfamily:debian].
  *   TARGET_PACKAGES            Space delimited list of packages to be updates [package1=version package2=version], empty string means all updating all packages to the latest version.
- *   TARGET_SIZE_TEST           Number of nodes to list package updates, empty string means all targetted nodes.
- *   TARGET_SIZE_SAMPLE         Number of selected noded to live apply selected package update.
- *   TARGET_SIZE_BATCH          Batch size for the complete live package update on all nodes, empty string means apply to all targetted nodes.
+ *   TARGET_SUBSET_TEST         Number of nodes to list package updates, empty string means all targetted nodes.
+ *   TARGET_SUBSET_LIVE         Number of selected nodes to live apply selected package update.
+ *   TARGET_BATCH_LIVE          Batch size for the complete live package update on all nodes, empty string means apply to all targetted nodes.
  *
 **/
 
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
new file mode 100644
index 0000000..6c8b379
--- /dev/null
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -0,0 +1,83 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [http://10.10.10.1:8000].
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+
+def saltMaster
+
+timestamps {
+    node() {
+
+        stage('Connect to Salt API') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage('Start restore') {
+            // # actual upgrade
+
+            stage('Ask for manual confirmation') {
+                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
+            }
+            // database restore section
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Mysql service already stopped')
+            }
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Mysql service already stopped')
+            }
+            try {
+                salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+            } catch (Exception er) {
+                common.warningMsg('Files are not present')
+            }
+            try {
+                salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+            } catch (Exception er) {
+                common.warningMsg('Directory already exists')
+            }
+            try {
+                salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+            } catch (Exception er) {
+                common.warningMsg('Directory already empty')
+            }
+            try {
+                salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+            } catch (Exception er) {
+                common.warningMsg('Files were already moved')
+            }
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+            } catch (Exception er) {
+                common.warningMsg('File is not present')
+            }
+            salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+            _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+            backup_dir = _pillar['return'][0].values()[0]
+            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+            print(backup_dir)
+            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+            sleep(5)
+            salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+            sleep(15)
+            salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
+
+        }
+    }
+}
+
+
+