Removed all the usage of timestamps

Change-Id: I475e3d572b53b0886aad1e8a4f032323c2fef619
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 37643cf..3f45233 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -32,317 +32,316 @@
 salt = new com.mirantis.mk.Salt()
 orchestrate = new com.mirantis.mk.Orchestrate()
 _MAX_PERMITTED_STACKS = 2
-timestamps {
-    node {
+
+node {
+    try {
+        // connection objects
+        def openstackCloud
+        def saltMaster
+
+        // value defaults
+        def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+        def openstackEnv = "${env.WORKSPACE}/venv"
+
         try {
-            // connection objects
-            def openstackCloud
-            def saltMaster
+            sshPubKey = SSH_PUBLIC_KEY
+        } catch (MissingPropertyException e) {
+            sshPubKey = false
+        }
 
-            // value defaults
-            def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-            def openstackEnv = "${env.WORKSPACE}/venv"
+        if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
+            error("If you want to reuse existing stack you need to provide it's name")
+        }
 
-            try {
-                sshPubKey = SSH_PUBLIC_KEY
-            } catch (MissingPropertyException e) {
-                sshPubKey = false
-            }
-
-            if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
-                error("If you want to reuse existing stack you need to provide it's name")
-            }
-
-            if (HEAT_STACK_REUSE.toBoolean() == false) {
-                // Don't allow to set custom heat stack name
-                wrap([$class: 'BuildUser']) {
-                    if (env.BUILD_USER_ID) {
-                        HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
-                    } else {
-                        HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
-                    }
-                    currentBuild.description = HEAT_STACK_NAME
-                }
-            }
-
-            //
-            // Bootstrap
-            //
-
-            stage ('Download Heat templates') {
-                git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
-            }
-
-            stage('Install OpenStack CLI') {
-                openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-            }
-
-            stage('Connect to OpenStack cloud') {
-                openstackCloud = openstack.createOpenstackEnv(
-                    OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
-                    OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
-                    OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
-                    OPENSTACK_API_VERSION)
-                openstack.getKeystoneToken(openstackCloud, openstackEnv)
-                wrap([$class: 'BuildUser']) {
-                    if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
-                        def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
-                        if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
-                            HEAT_STACK_DELETE = "false"
-                            throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
-                        }
-                    }
-                }
-            }
-
-            if (HEAT_STACK_REUSE.toBoolean() == false) {
-                stage('Launch new Heat stack') {
-                    envParams = [
-                        'instance_zone': HEAT_STACK_ZONE,
-                        'public_net': HEAT_STACK_PUBLIC_NET
-                    ]
-                    openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
-                }
-            }
-
-            stage('Connect to Salt master') {
-                def saltMasterPort
-                try {
-                    saltMasterPort = SALT_MASTER_PORT
-                } catch (MissingPropertyException e) {
-                    saltMasterPort = 6969
-                }
-                saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
-                currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
-                saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
-                saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
-            }
-
-            //
-            // Install
-            //
-
-            stage('Install core infra') {
-                // salt.master, reclass
-                // refresh_pillar
-                // sync_all
-                // linux,openssh,salt.minion.ntp
-
-                orchestrate.installFoundationInfra(saltMaster)
-                orchestrate.validateFoundationInfra(saltMaster)
-            }
-
-            stage("Deploy GlusterFS") {
-                salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
-                retry(2) {
-                    salt.enforceState(saltMaster, 'ci01*', 'glusterfs.server.setup', true)
-                }
-                sleep(5)
-                salt.enforceState(saltMaster, 'I@glusterfs:client', 'glusterfs.client', true)
-
-                timeout(5) {
-                    println "Waiting for GlusterFS volumes to get mounted.."
-                    salt.cmdRun(saltMaster, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
-                }
-                print common.prettyPrint(salt.cmdRun(saltMaster, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
-            }
-
-            stage("Deploy GlusterFS") {
-                salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy,keepalived')
-            }
-
-            stage("Setup Docker Swarm") {
-                salt.enforceState(saltMaster, 'I@docker:host', 'docker.host', true)
-                salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.swarm', true)
-                salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'salt', true)
-                salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.flush')
-                salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.update')
-                salt.enforceState(saltMaster, 'I@docker:swarm', 'docker.swarm', true)
-                print common.prettyPrint(salt.cmdRun(saltMaster, 'I@docker:swarm:role:master', 'docker node ls'))
-            }
-
-            stage("Configure OSS services") {
-                salt.enforceState(saltMaster, 'I@devops_portal:config', 'devops_portal.config')
-                salt.enforceState(saltMaster, 'I@rundeck:server', 'rundeck.server')
-            }
-
-            stage("Deploy Docker services") {
-                // We need /etc/aptly-publisher.yaml to be present before
-                // services are deployed
-                // XXX: for some weird unknown reason, refresh_pillar is
-                // required to execute here
-                salt.runSaltProcessStep(saltMaster, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
-                retry(3) {
-                    sleep(5)
-                    salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
-                }
-                // XXX: Workaround to have `/var/lib/jenkins` on all
-                // nodes where are jenkins_slave services are created.
-                salt.runSaltProcessStep(saltMaster, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
-            }
-
-            stage("Configure CI/CD services") {
-                salt.syncAll(saltMaster, '*')
-
-                // Aptly
-                timeout(10) {
-                    println "Waiting for Aptly to come up.."
-                    retry(2) {
-                        // XXX: retry to workaround magical VALUE_TRIMMED
-                        // response from salt master + to give slow cloud some
-                        // more time to settle down
-                        salt.cmdRun(saltMaster, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
-                    }
-                }
-                salt.enforceState(saltMaster, 'I@aptly:server', 'aptly', true)
-
-                // OpenLDAP
-                timeout(10) {
-                    println "Waiting for OpenLDAP to come up.."
-                    salt.cmdRun(saltMaster, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
-                }
-                salt.enforceState(saltMaster, 'I@openldap:client', 'openldap', true)
-
-                // Gerrit
-                timeout(10) {
-                    println "Waiting for Gerrit to come up.."
-                    salt.cmdRun(saltMaster, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
-                }
-                salt.enforceState(saltMaster, 'I@gerrit:client', 'gerrit', true)
-
-                // Jenkins
-                timeout(10) {
-                    println "Waiting for Jenkins to come up.."
-                    salt.cmdRun(saltMaster, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
-                }
-                retry(2) {
-                    // XXX: needs retry as first run installs python-jenkins
-                    // thus make jenkins modules available for second run
-                    salt.enforceState(saltMaster, 'I@jenkins:client', 'jenkins', true)
-                }
-
-                // Postgres client - initialize OSS services databases
-                timeout(300){
-                    println "Waiting for postgresql database to come up.."
-                    salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_db | grep "ready to accept"; then break; else sleep 5; fi; done')
-                }
-                salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
-
-                // Setup postgres database with integration between
-                // Pushkin notification service and Security Monkey security audit service
-                timeout(10) {
-                    println "Waiting for Pushkin to come up.."
-                    salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
-                }
-                salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true)
-
-                // Rundeck
-                timeout(10) {
-                    println "Waiting for Rundeck to come up.."
-                    salt.cmdRun(saltMaster, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
-                }
-                salt.enforceState(saltMaster, 'I@rundeck:client', 'rundeck.client', true)
-
-                // Elasticsearch
-                timeout(10) {
-                    println 'Waiting for Elasticsearch to come up..'
-                    salt.cmdRun(saltMaster, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
-                }
-                salt.enforceState(saltMaster, 'I@elasticsearch:client', 'elasticsearch.client', true)
-            }
-
-            stage("Finalize") {
-                //
-                // Deploy user's ssh key
-                //
-                def adminUser
-                def authorizedKeysFile
-                def adminUserCmdOut = salt.cmdRun(saltMaster, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
-                if (adminUserCmdOut =~ /ubuntu user exists/) {
-                    adminUser = "ubuntu"
-                    authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
+        if (HEAT_STACK_REUSE.toBoolean() == false) {
+            // Don't allow to set custom heat stack name
+            wrap([$class: 'BuildUser']) {
+                if (env.BUILD_USER_ID) {
+                    HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
                 } else {
-                    adminUser = "root"
-                    authorizedKeysFile = "/root/.ssh/authorized_keys"
+                    HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
                 }
+                currentBuild.description = HEAT_STACK_NAME
+            }
+        }
 
-                if (sshPubKey) {
-                    println "Deploying provided ssh key at ${authorizedKeysFile}"
-                    salt.cmdRun(saltMaster, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
+        //
+        // Bootstrap
+        //
+
+        stage ('Download Heat templates') {
+            git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
+        }
+
+        stage('Install OpenStack CLI') {
+            openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+        }
+
+        stage('Connect to OpenStack cloud') {
+            openstackCloud = openstack.createOpenstackEnv(
+                OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+                OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                OPENSTACK_API_VERSION)
+            openstack.getKeystoneToken(openstackCloud, openstackEnv)
+            wrap([$class: 'BuildUser']) {
+                if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
+                    def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
+                    if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
+                        HEAT_STACK_DELETE = "false"
+                        throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
+                    }
                 }
+            }
+        }
 
-                //
-                // Generate docs
-                //
+        if (HEAT_STACK_REUSE.toBoolean() == false) {
+            stage('Launch new Heat stack') {
+                envParams = [
+                    'instance_zone': HEAT_STACK_ZONE,
+                    'public_net': HEAT_STACK_PUBLIC_NET
+                ]
+                openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
+            }
+        }
+
+        stage('Connect to Salt master') {
+            def saltMasterPort
+            try {
+                saltMasterPort = SALT_MASTER_PORT
+            } catch (MissingPropertyException e) {
+                saltMasterPort = 6969
+            }
+            saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
+            currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
+            saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
+            saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
+        }
+
+        //
+        // Install
+        //
+
+        stage('Install core infra') {
+            // salt.master, reclass
+            // refresh_pillar
+            // sync_all
+            // linux,openssh,salt.minion.ntp
+
+            orchestrate.installFoundationInfra(saltMaster)
+            orchestrate.validateFoundationInfra(saltMaster)
+        }
+
+        stage("Deploy GlusterFS") {
+            salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
+            retry(2) {
+                salt.enforceState(saltMaster, 'ci01*', 'glusterfs.server.setup', true)
+            }
+            sleep(5)
+            salt.enforceState(saltMaster, 'I@glusterfs:client', 'glusterfs.client', true)
+
+            timeout(5) {
+                println "Waiting for GlusterFS volumes to get mounted.."
+                salt.cmdRun(saltMaster, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
+            }
+            print common.prettyPrint(salt.cmdRun(saltMaster, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
+        }
+
+        stage("Deploy GlusterFS") {
+            salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy,keepalived')
+        }
+
+        stage("Setup Docker Swarm") {
+            salt.enforceState(saltMaster, 'I@docker:host', 'docker.host', true)
+            salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.swarm', true)
+            salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'salt', true)
+            salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.flush')
+            salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.update')
+            salt.enforceState(saltMaster, 'I@docker:swarm', 'docker.swarm', true)
+            print common.prettyPrint(salt.cmdRun(saltMaster, 'I@docker:swarm:role:master', 'docker node ls'))
+        }
+
+        stage("Configure OSS services") {
+            salt.enforceState(saltMaster, 'I@devops_portal:config', 'devops_portal.config')
+            salt.enforceState(saltMaster, 'I@rundeck:server', 'rundeck.server')
+        }
+
+        stage("Deploy Docker services") {
+            // We need /etc/aptly-publisher.yaml to be present before
+            // services are deployed
+            // XXX: for some weird unknown reason, refresh_pillar is
+            // required to execute here
+            salt.runSaltProcessStep(saltMaster, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
+            salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
+            retry(3) {
+                sleep(5)
+                salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
+            }
+            // XXX: Workaround to have `/var/lib/jenkins` on all
+            // nodes where are jenkins_slave services are created.
+            salt.runSaltProcessStep(saltMaster, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
+        }
+
+        stage("Configure CI/CD services") {
+            salt.syncAll(saltMaster, '*')
+
+            // Aptly
+            timeout(10) {
+                println "Waiting for Aptly to come up.."
+                retry(2) {
+                    // XXX: retry to workaround magical VALUE_TRIMMED
+                    // response from salt master + to give slow cloud some
+                    // more time to settle down
+                    salt.cmdRun(saltMaster, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+                }
+            }
+            salt.enforceState(saltMaster, 'I@aptly:server', 'aptly', true)
+
+            // OpenLDAP
+            timeout(10) {
+                println "Waiting for OpenLDAP to come up.."
+                salt.cmdRun(saltMaster, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
+            }
+            salt.enforceState(saltMaster, 'I@openldap:client', 'openldap', true)
+
+            // Gerrit
+            timeout(10) {
+                println "Waiting for Gerrit to come up.."
+                salt.cmdRun(saltMaster, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
+            }
+            salt.enforceState(saltMaster, 'I@gerrit:client', 'gerrit', true)
+
+            // Jenkins
+            timeout(10) {
+                println "Waiting for Jenkins to come up.."
+                salt.cmdRun(saltMaster, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
+            }
+            retry(2) {
+                // XXX: needs retry as first run installs python-jenkins
+                // thus make jenkins modules available for second run
+                salt.enforceState(saltMaster, 'I@jenkins:client', 'jenkins', true)
+            }
+
+            // Postgres client - initialize OSS services databases
+            timeout(300){
+                println "Waiting for postgresql database to come up.."
+                salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_db | grep "ready to accept"; then break; else sleep 5; fi; done')
+            }
+            salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
+
+            // Setup postgres database with integration between
+            // Pushkin notification service and Security Monkey security audit service
+            timeout(10) {
+                println "Waiting for Pushkin to come up.."
+                salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
+            }
+            salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true)
+
+            // Rundeck
+            timeout(10) {
+                println "Waiting for Rundeck to come up.."
+                salt.cmdRun(saltMaster, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
+            }
+            salt.enforceState(saltMaster, 'I@rundeck:client', 'rundeck.client', true)
+
+            // Elasticsearch
+            timeout(10) {
+                println 'Waiting for Elasticsearch to come up..'
+                salt.cmdRun(saltMaster, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
+            }
+            salt.enforceState(saltMaster, 'I@elasticsearch:client', 'elasticsearch.client', true)
+        }
+
+        stage("Finalize") {
+            //
+            // Deploy user's ssh key
+            //
+            def adminUser
+            def authorizedKeysFile
+            def adminUserCmdOut = salt.cmdRun(saltMaster, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
+            if (adminUserCmdOut =~ /ubuntu user exists/) {
+                adminUser = "ubuntu"
+                authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
+            } else {
+                adminUser = "root"
+                authorizedKeysFile = "/root/.ssh/authorized_keys"
+            }
+
+            if (sshPubKey) {
+                println "Deploying provided ssh key at ${authorizedKeysFile}"
+                salt.cmdRun(saltMaster, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
+            }
+
+            //
+            // Generate docs
+            //
+            try {
                 try {
-                    try {
-                        // Run sphinx state to install sphinx-build needed in
-                        // upcomming orchestrate
-                        salt.enforceState(saltMaster, 'I@sphinx:server', 'sphinx')
-                    } catch (Throwable e) {
-                        true
-                    }
-                    retry(3) {
-                        // TODO: fix salt.orchestrateSystem
-                        // print salt.orchestrateSystem(saltMaster, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
-                        def out = salt.cmdRun(saltMaster, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
-                        print common.prettyPrint(out)
-                        if (out =~ /Command execution failed/) {
-                            throw new Exception("Command execution failed")
-                        }
-                    }
+                    // Run sphinx state to install sphinx-build needed in
+                    // upcomming orchestrate
+                    salt.enforceState(saltMaster, 'I@sphinx:server', 'sphinx')
                 } catch (Throwable e) {
-                    // We don't want sphinx docs to ruin whole build, so possible
-                    // errors are just ignored here
                     true
                 }
-                salt.enforceState(saltMaster, 'I@nginx:server', 'nginx')
-
-                def failedSvc = salt.cmdRun(saltMaster, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
-                print common.prettyPrint(failedSvc)
-                if (failedSvc =~ /Command execution failed/) {
-                    common.errorMsg("Some services are not running. Environment may not be fully functional!")
+                retry(3) {
+                    // TODO: fix salt.orchestrateSystem
+                    // print salt.orchestrateSystem(saltMaster, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
+                    def out = salt.cmdRun(saltMaster, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
+                    print common.prettyPrint(out)
+                    if (out =~ /Command execution failed/) {
+                        throw new Exception("Command execution failed")
+                    }
                 }
-
-                common.successMsg("""
-    ============================================================
-    Your CI/CD lab has been deployed and you can enjoy it:
-    Use sshuttle to connect to your private subnet:
-
-        sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
-
-    And visit services running at 172.16.10.254 (vip address):
-
-        9600    HAProxy statistics
-        8080    Gerrit
-        8081    Jenkins
-        8089    LDAP administration
-        4440    Rundeck
-        8084    DevOps Portal
-        8091    Docker swarm visualizer
-        8090    Reclass-generated documentation
-
-    If you provided SSH_PUBLIC_KEY, you can use it to login,
-    otherwise you need to get private key connected to this
-    heat template.
-
-    DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
-    ============================================================""")
+            } catch (Throwable e) {
+                // We don't want sphinx docs to ruin whole build, so possible
+                // errors are just ignored here
+                true
             }
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            throw e
-        } finally {
-            // Cleanup
-            if (HEAT_STACK_DELETE.toBoolean() == true) {
-                stage('Trigger cleanup job') {
-                    build(job: 'deploy-stack-cleanup', parameters: [
-                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
-                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
-                    ])
-                }
+            salt.enforceState(saltMaster, 'I@nginx:server', 'nginx')
+
+            def failedSvc = salt.cmdRun(saltMaster, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
+            print common.prettyPrint(failedSvc)
+            if (failedSvc =~ /Command execution failed/) {
+                common.errorMsg("Some services are not running. Environment may not be fully functional!")
+            }
+
+            common.successMsg("""
+============================================================
+Your CI/CD lab has been deployed and you can enjoy it:
+Use sshuttle to connect to your private subnet:
+
+    sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
+
+And visit services running at 172.16.10.254 (vip address):
+
+    9600    HAProxy statistics
+    8080    Gerrit
+    8081    Jenkins
+    8089    LDAP administration
+    4440    Rundeck
+    8084    DevOps Portal
+    8091    Docker swarm visualizer
+    8090    Reclass-generated documentation
+
+If you provided SSH_PUBLIC_KEY, you can use it to login,
+otherwise you need to get private key connected to this
+heat template.
+
+DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
+============================================================""")
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        // Cleanup
+        if (HEAT_STACK_DELETE.toBoolean() == true) {
+            stage('Trigger cleanup job') {
+                build(job: 'deploy-stack-cleanup', parameters: [
+                    [$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
+                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
+                ])
             }
         }
     }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 2ec5e14..2583fb1 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -17,84 +17,84 @@
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
 ssh = new com.mirantis.mk.Ssh()
 
-timestamps {
-    node("python&&docker") {
-        def templateEnv = "${env.WORKSPACE}/template"
-        def modelEnv = "${env.WORKSPACE}/model"
-        def testEnv = "${env.WORKSPACE}/test"
 
-        try {
-            def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
-            def clusterDomain = templateContext.default_context.cluster_domain
-            def clusterName = templateContext.default_context.cluster_name
-            def cutterEnv = "${env.WORKSPACE}/cutter"
-            def jinjaEnv = "${env.WORKSPACE}/jinja"
-            def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
-            def targetBranch = "feature/${clusterName}"
-            def templateBaseDir = "${env.WORKSPACE}/template"
-            def templateDir = "${templateEnv}/template/dir"
-            def templateOutputDir = templateBaseDir
-            def user
-            wrap([$class: 'BuildUser']) {
-                user = env.BUILD_USER_ID
-            }
+node("python&&docker") {
+    def templateEnv = "${env.WORKSPACE}/template"
+    def modelEnv = "${env.WORKSPACE}/model"
+    def testEnv = "${env.WORKSPACE}/test"
 
-            currentBuild.description = clusterName
-            print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+    try {
+        def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+        def clusterDomain = templateContext.default_context.cluster_domain
+        def clusterName = templateContext.default_context.cluster_name
+        def cutterEnv = "${env.WORKSPACE}/cutter"
+        def jinjaEnv = "${env.WORKSPACE}/jinja"
+        def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+        def targetBranch = "feature/${clusterName}"
+        def templateBaseDir = "${env.WORKSPACE}/template"
+        def templateDir = "${templateEnv}/template/dir"
+        def templateOutputDir = templateBaseDir
+        def user
+        wrap([$class: 'BuildUser']) {
+            user = env.BUILD_USER_ID
+        }
 
-            stage ('Download Cookiecutter template') {
-                git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, COOKIECUTTER_TEMPLATE_CREDENTIALS)
-            }
+        currentBuild.description = clusterName
+        print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
 
-            stage ('Create empty reclass model') {
-                dir(path: modelEnv) {
-                    sh "rm -rfv .git"
-                    sh "git init"
+        stage ('Download Cookiecutter template') {
+            git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, COOKIECUTTER_TEMPLATE_CREDENTIALS)
+        }
 
-                    if (SHARED_RECLASS_URL != '') {
-                        ssh.agentSh "git submodule add \"${SHARED_RECLASS_URL}\" \"classes/system\""
-                        git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
-                    }
+        stage ('Create empty reclass model') {
+            dir(path: modelEnv) {
+                sh "rm -rfv .git"
+                sh "git init"
+
+                if (SHARED_RECLASS_URL != '') {
+                    ssh.agentSh "git submodule add \"${SHARED_RECLASS_URL}\" \"classes/system\""
+                    git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
                 }
             }
+        }
 
-            def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
-            for (product in productList) {
+        def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
+        for (product in productList) {
 
-                // get templateOutputDir and productDir
-                if (product.startsWith("stacklight")) {
-                    templateOutputDir = "${env.WORKSPACE}/output/stacklight"
-                    try {
-                        productDir = "stacklight" + templateContext.default_context['stacklight_version']
-                    } catch (Throwable e) {
-                        productDir = "stacklight1"
-                    }
-                } else {
-                    templateOutputDir = "${env.WORKSPACE}/output/${product}"
-                    productDir = product
+            // get templateOutputDir and productDir
+            if (product.startsWith("stacklight")) {
+                templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+                try {
+                    productDir = "stacklight" + templateContext.default_context['stacklight_version']
+                } catch (Throwable e) {
+                    productDir = "stacklight1"
                 }
-
-                if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                    && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
-                    templateDir = "${templateEnv}/cluster_product/${productDir}"
-                    common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
-                    sh "rm -rf ${templateOutputDir} || true"
-                    sh "mkdir -p ${templateOutputDir}"
-                    sh "mkdir -p ${outputDestination}"
-
-                    python.setupCookiecutterVirtualenv(cutterEnv)
-                    python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
-                    sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-                } else {
-                    common.warningMsg("Product " + product + " is disabled")
-                }
+            } else {
+                templateOutputDir = "${env.WORKSPACE}/output/${product}"
+                productDir = product
             }
 
-            stage('Generate new SaltMaster node') {
-                def nodeFile = "${modelEnv}/nodes/cfg01.${clusterDomain}.yml"
-                def nodeString = """classes:
+            if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+                && templateContext.default_context["${product}_enabled"].toBoolean())) {
+
+                templateDir = "${templateEnv}/cluster_product/${productDir}"
+                common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+
+                sh "rm -rf ${templateOutputDir} || true"
+                sh "mkdir -p ${templateOutputDir}"
+                sh "mkdir -p ${outputDestination}"
+
+                python.setupCookiecutterVirtualenv(cutterEnv)
+                python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
+                sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+            } else {
+                common.warningMsg("Product " + product + " is disabled")
+            }
+        }
+
+        stage('Generate new SaltMaster node') {
+            def nodeFile = "${modelEnv}/nodes/cfg01.${clusterDomain}.yml"
+            def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
   _param:
@@ -105,73 +105,72 @@
       name: cfg01
       domain: ${clusterDomain}
 """
-                sh "mkdir -p ${modelEnv}/nodes/"
-                writeFile(file: nodeFile, text: nodeString)
+            sh "mkdir -p ${modelEnv}/nodes/"
+            writeFile(file: nodeFile, text: nodeString)
 
-                git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
-            }
-
-            stage("Test") {
-                if (SHARED_RECLASS_URL != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
-                    sh("cp -r ${modelEnv} ${testEnv}")
-                    saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", "", testEnv)
-                }
-            }
-
-            stage("Generate config drive") {
-                // apt package genisoimage is required for this stage
-
-                // download create-config-drive
-                def config_drive_script_url = "https://raw.githubusercontent.com/pupapaik/virt-utils/master/create-config-drive"
-                def user_data_script_url = "https://raw.githubusercontent.com/mceloud/scripts/master/master_config.sh"
-
-                sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
-                sh "wget -O user_data.sh ${user_data_script_url}"
-
-
-                // load data from model
-                def smc = [:]
-                smc['SALT_MASTER_MINION_ID'] = "cfg.${clusterDomain}"
-                smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
-                smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-
-                for (i in common.entries(smc)) {
-                    sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
-                }
-
-                // create config-drive
-                sh "./create-config-drive --user-data user_data.sh --hostname cfg --model ${modelEnv} cfg.${clusterDomain}-config.iso"
-
-                // save iso to artifacts
-                archiveArtifacts artifacts: "cfg.${clusterDomain}-config.iso"
-            }
-
-            stage ('Save changes reclass model') {
-
-                sh(returnStatus: true, script: "tar -zcf ${clusterName}.tar.gz -C ${modelEnv} .")
-                archiveArtifacts artifacts: "${clusterName}.tar.gz"
-
-
-                if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
-                     emailext(to: EMAIL_ADDRESS,
-                              attachmentsPattern: "${clusterName}.tar.gz",
-                              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                              subject: "Your Salt model ${clusterName}")
-                }
-            }
-
-        } catch (Throwable e) {
-             // If there was an error or exception thrown, the build failed
-             currentBuild.result = "FAILURE"
-             throw e
-        } finally {
-            stage ('Clean workspace directories') {
-                sh(returnStatus: true, script: "rm -rf ${templateEnv}")
-                sh(returnStatus: true, script: "rm -rf ${modelEnv}")
-            }
-             // common.sendNotification(currentBuild.result,"",["slack"])
+            git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
         }
+
+        stage("Test") {
+            if (SHARED_RECLASS_URL != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
+                sh("cp -r ${modelEnv} ${testEnv}")
+                saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", "", testEnv)
+            }
+        }
+
+        stage("Generate config drive") {
+            // apt package genisoimage is required for this stage
+
+            // download create-config-drive
+            def config_drive_script_url = "https://raw.githubusercontent.com/pupapaik/virt-utils/master/create-config-drive"
+            def user_data_script_url = "https://raw.githubusercontent.com/mceloud/scripts/master/master_config.sh"
+
+            sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+            sh "wget -O user_data.sh ${user_data_script_url}"
+
+
+            // load data from model
+            def smc = [:]
+            smc['SALT_MASTER_MINION_ID'] = "cfg.${clusterDomain}"
+            smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+            smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+            smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+            smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+
+            for (i in common.entries(smc)) {
+                sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
+            }
+
+            // create config-drive
+            sh "./create-config-drive --user-data user_data.sh --hostname cfg --model ${modelEnv} cfg.${clusterDomain}-config.iso"
+
+            // save iso to artifacts
+            archiveArtifacts artifacts: "cfg.${clusterDomain}-config.iso"
+        }
+
+        stage ('Save changes reclass model') {
+
+            sh(returnStatus: true, script: "tar -zcf ${clusterName}.tar.gz -C ${modelEnv} .")
+            archiveArtifacts artifacts: "${clusterName}.tar.gz"
+
+
+            if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+                 emailext(to: EMAIL_ADDRESS,
+                          attachmentsPattern: "${clusterName}.tar.gz",
+                          body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+                          subject: "Your Salt model ${clusterName}")
+            }
+        }
+
+    } catch (Throwable e) {
+         // If there was an error or exception thrown, the build failed
+         currentBuild.result = "FAILURE"
+         throw e
+    } finally {
+        stage ('Clean workspace directories') {
+            sh(returnStatus: true, script: "rm -rf ${templateEnv}")
+            sh(returnStatus: true, script: "rm -rf ${modelEnv}")
+        }
+         // common.sendNotification(currentBuild.result,"",["slack"])
     }
 }
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index 9f4571c..c605421 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -81,311 +81,309 @@
 
 _MAX_PERMITTED_STACKS = 2
 
-timestamps {
-    node {
-        // try to get STACK_INSTALL or fallback to INSTALL if exists
-        try {
-          def temporary = STACK_INSTALL
-        } catch (MissingPropertyException e) {
-          try {
-            STACK_INSTALL = INSTALL
-            env['STACK_INSTALL'] = INSTALL
-          } catch (MissingPropertyException e2) {
-            common.errorMsg("Property STACK_INSTALL or INSTALL not found!")
-          }
-        }
-        try {
-            //
-            // Prepare machines
-            //
-            stage ('Create infrastructure') {
-
-                if (STACK_TYPE == 'heat') {
-                    // value defaults
-                    def openstackCloud
-                    def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-                    def openstackEnv = "${env.WORKSPACE}/venv"
-
-                    if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
-                        error("If you want to reuse existing stack you need to provide it's name")
-                    }
-
-                    if (STACK_REUSE.toBoolean() == false) {
-                        // Don't allow to set custom heat stack name
-                        wrap([$class: 'BuildUser']) {
-                            if (env.BUILD_USER_ID) {
-                                STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
-                            } else {
-                                STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
-                            }
-                            currentBuild.description = STACK_NAME
-                        }
-                    }
-
-                    // set description
-                    currentBuild.description = "${STACK_NAME}"
-
-                    // get templates
-                    git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
-
-                    // create openstack env
-                    openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-                    openstackCloud = openstack.createOpenstackEnv(
-                        OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
-                        OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
-                        OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
-                        OPENSTACK_API_VERSION)
-                    openstack.getKeystoneToken(openstackCloud, openstackEnv)
-                    //
-                    // Verify possibility of create stack for given user and stack type
-                    //
-                    wrap([$class: 'BuildUser']) {
-                        if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !env.BUILD_USER_ID.equals("mceloud") && !STACK_REUSE.toBoolean()) {
-                            def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
-                            if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
-                                STACK_DELETE = "false"
-                                throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
-                            }
-                        }
-                    }
-                    // launch stack
-                    if (STACK_REUSE.toBoolean() == false) {
-                        stage('Launch new Heat stack') {
-                            // create stack
-                            envParams = [
-                                'instance_zone': HEAT_STACK_ZONE,
-                                'public_net': HEAT_STACK_PUBLIC_NET
-                            ]
-                            try {
-                                envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
-                                envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
-                            } catch (MissingPropertyException e) {
-                                common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
-                            }
-                            openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
-                        }
-                    }
-
-                    // get SALT_MASTER_URL
-                    saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
-                    currentBuild.description = "${STACK_NAME}: ${saltMasterHost}"
-
-                    SALT_MASTER_URL = "http://${saltMasterHost}:6969"
-                }
-            }
-
-            //
-            // Connect to Salt master
-            //
-
-            def saltMaster
-            stage('Connect to Salt API') {
-                saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            }
-
-            // Set up override params
-            if (env.getEnvironment().containsKey('SALT_OVERRIDES')) {
-                stage('Set Salt overrides') {
-                    salt.setSaltOverrides(saltMaster,  SALT_OVERRIDES)
-                }
-            }
-
-            //
-            // Install
-            //
-
-            if (common.checkContains('STACK_INSTALL', 'core')) {
-                stage('Install core infrastructure') {
-                    orchestrate.installFoundationInfra(saltMaster)
-
-                    if (common.checkContains('STACK_INSTALL', 'kvm')) {
-                        orchestrate.installInfraKvm(saltMaster)
-                        orchestrate.installFoundationInfra(saltMaster)
-                    }
-
-                    orchestrate.validateFoundationInfra(saltMaster)
-                }
-            }
-
-            // install k8s
-            if (common.checkContains('STACK_INSTALL', 'k8s')) {
-
-                // install infra libs for k8s
-                stage('Install Kubernetes infra') {
-                    orchestrate.installKubernetesInfra(saltMaster)
-                }
-
-                // If k8s install with contrail network manager then contrail need to be install first
-                if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    stage('Install Contrail for Kubernetes') {
-                        orchestrate.installContrailNetwork(saltMaster)
-                        orchestrate.installContrailCompute(saltMaster)
-                        orchestrate.installKubernetesContrailCompute(saltMaster)
-                    }
-                }
-
-                stage('Install Kubernetes control') {
-                    orchestrate.installKubernetesControl(saltMaster)
-                }
-            }
-
-            // install openstack
-            if (common.checkContains('STACK_INSTALL', 'openstack')) {
-                // install Infra and control, tests, ...
-
-                stage('Install OpenStack infra') {
-                    orchestrate.installOpenstackInfra(saltMaster)
-                }
-
-                stage('Install OpenStack control') {
-                    orchestrate.installOpenstackControl(saltMaster)
-                }
-
-                stage('Install OpenStack network') {
-
-                    if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                        orchestrate.installContrailNetwork(saltMaster)
-                    } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
-                        orchestrate.installOpenstackNetwork(saltMaster)
-                    }
-
-                    salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
-                    salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
-                }
-
-                if (salt.testTarget(saltMaster, 'I@ironic:conductor')){
-                    stage('Install OpenStack Ironic conductor') {
-                        orchestrate.installIronicConductor(saltMaster)
-                    }
-                }
-
-
-                stage('Install OpenStack compute') {
-                    orchestrate.installOpenstackCompute(saltMaster)
-
-                    if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                        orchestrate.installContrailCompute(saltMaster)
-                    }
-                }
-
-            }
-
-
-            if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
-                stage('Install StackLight v1') {
-                    orchestrate.installStacklightv1Control(saltMaster)
-                    orchestrate.installStacklightv1Client(saltMaster)
-                }
-            }
-
-            if (common.checkContains('STACK_INSTALL', 'stacklight')) {
-                stage('Install StackLight') {
-                    orchestrate.installDockerSwarm(saltMaster)
-                    orchestrate.installStacklight(saltMaster)
-                }
-            }
-
-            //
-            // Test
-            //
-            def artifacts_dir = '_artifacts/'
-
-            if (common.checkContains('STACK_TEST', 'k8s')) {
-                stage('Run k8s bootstrap tests') {
-                    def image = 'tomkukral/k8s-scripts'
-                    def output_file = image.replaceAll('/', '-') + '.output'
-
-                    // run image
-                    test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
-
-                    // collect output
-                    sh "mkdir -p ${artifacts_dir}"
-                    file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
-                    writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                    sh "cat ${artifacts_dir}${output_file}"
-
-                    // collect artifacts
-                    archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-                }
-
-                stage('Run k8s conformance e2e tests') {
-                    //test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, TEST_K8S_CONFORMANCE_IMAGE)
-
-                    def image = TEST_K8S_CONFORMANCE_IMAGE
-                    def output_file = image.replaceAll('/', '-') + '.output'
-
-                    // run image
-                    test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
-
-                    // collect output
-                    sh "mkdir -p ${artifacts_dir}"
-                    file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
-                    writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                    sh "cat ${artifacts_dir}${output_file}"
-
-                    // collect artifacts
-                    archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-                }
-            }
-
-            if (common.checkContains('STACK_TEST', 'openstack')) {
-                if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
-                    test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
-                }
-                stage('Run OpenStack tests') {
-                    test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
-                }
-
-                stage('Copy Tempest results to config node') {
-                    test.copyTempestResults(saltMaster, TEST_TEMPEST_TARGET)
-                }
-            }
-
-            if (common.checkContains('STACK_INSTALL', 'finalize')) {
-                stage('Finalize') {
-                    salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
-                }
-            }
-        } catch (Throwable e) {
-            currentBuild.result = 'FAILURE'
-            throw e
-        } finally {
-
-
-            //
-            // Clean
-            //
+node {
+    // try to get STACK_INSTALL or fallback to INSTALL if exists
+    try {
+      def temporary = STACK_INSTALL
+    } catch (MissingPropertyException e) {
+      try {
+        STACK_INSTALL = INSTALL
+        env['STACK_INSTALL'] = INSTALL
+      } catch (MissingPropertyException e2) {
+        common.errorMsg("Property STACK_INSTALL or INSTALL not found!")
+      }
+    }
+    try {
+        //
+        // Prepare machines
+        //
+        stage ('Create infrastructure') {
 
             if (STACK_TYPE == 'heat') {
-                // send notification
-                common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
+                // value defaults
+                def openstackCloud
+                def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+                def openstackEnv = "${env.WORKSPACE}/venv"
 
-                if (STACK_DELETE.toBoolean() == true) {
-                    common.errorMsg('Heat job cleanup triggered')
-                    stage('Trigger cleanup job') {
-                        build(job: STACK_CLEANUP_JOB, parameters: [
-                            [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
-                            [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_URL', value: OPENSTACK_API_URL],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_CREDENTIALS', value: OPENSTACK_API_CREDENTIALS],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_DOMAIN', value: OPENSTACK_API_PROJECT_DOMAIN],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_ID', value: OPENSTACK_API_PROJECT_ID],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_USER_DOMAIN', value: OPENSTACK_API_USER_DOMAIN],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_CLIENT', value: OPENSTACK_API_CLIENT],
-                            [$class: 'StringParameterValue', name: 'OPENSTACK_API_VERSION', value: OPENSTACK_API_VERSION]
-                        ])
+                if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
+                    error("If you want to reuse existing stack you need to provide it's name")
+                }
+
+                if (STACK_REUSE.toBoolean() == false) {
+                    // Don't allow to set custom heat stack name
+                    wrap([$class: 'BuildUser']) {
+                        if (env.BUILD_USER_ID) {
+                            STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
+                        } else {
+                            STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
+                        }
+                        currentBuild.description = STACK_NAME
                     }
-                } else {
-                    if (currentBuild.result == 'FAILURE') {
-                        common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
+                }
 
-                        if (SALT_MASTER_URL) {
-                            common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
+                // set description
+                currentBuild.description = "${STACK_NAME}"
+
+                // get templates
+                git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
+
+                // create openstack env
+                openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+                openstackCloud = openstack.createOpenstackEnv(
+                    OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                    OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+                    OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                    OPENSTACK_API_VERSION)
+                openstack.getKeystoneToken(openstackCloud, openstackEnv)
+                //
+                // Verify possibility of create stack for given user and stack type
+                //
+                wrap([$class: 'BuildUser']) {
+                    if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !env.BUILD_USER_ID.equals("mceloud") && !STACK_REUSE.toBoolean()) {
+                        def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
+                        if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
+                            STACK_DELETE = "false"
+                            throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
                         }
                     }
-
                 }
+                // launch stack
+                if (STACK_REUSE.toBoolean() == false) {
+                    stage('Launch new Heat stack') {
+                        // create stack
+                        envParams = [
+                            'instance_zone': HEAT_STACK_ZONE,
+                            'public_net': HEAT_STACK_PUBLIC_NET
+                        ]
+                        try {
+                            envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
+                            envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
+                        } catch (MissingPropertyException e) {
+                            common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
+                        }
+                        openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
+                    }
+                }
+
+                // get SALT_MASTER_URL
+                saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
+                currentBuild.description = "${STACK_NAME}: ${saltMasterHost}"
+
+                SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+            }
+        }
+
+        //
+        // Connect to Salt master
+        //
+
+        def saltMaster
+        stage('Connect to Salt API') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        // Set up override params
+        if (env.getEnvironment().containsKey('SALT_OVERRIDES')) {
+            stage('Set Salt overrides') {
+                salt.setSaltOverrides(saltMaster,  SALT_OVERRIDES)
+            }
+        }
+
+        //
+        // Install
+        //
+
+        if (common.checkContains('STACK_INSTALL', 'core')) {
+            stage('Install core infrastructure') {
+                orchestrate.installFoundationInfra(saltMaster)
+
+                if (common.checkContains('STACK_INSTALL', 'kvm')) {
+                    orchestrate.installInfraKvm(saltMaster)
+                    orchestrate.installFoundationInfra(saltMaster)
+                }
+
+                orchestrate.validateFoundationInfra(saltMaster)
+            }
+        }
+
+        // install k8s
+        if (common.checkContains('STACK_INSTALL', 'k8s')) {
+
+            // install infra libs for k8s
+            stage('Install Kubernetes infra') {
+                orchestrate.installKubernetesInfra(saltMaster)
+            }
+
+            // If k8s install with contrail network manager then contrail need to be install first
+            if (common.checkContains('STACK_INSTALL', 'contrail')) {
+                stage('Install Contrail for Kubernetes') {
+                    orchestrate.installContrailNetwork(saltMaster)
+                    orchestrate.installContrailCompute(saltMaster)
+                    orchestrate.installKubernetesContrailCompute(saltMaster)
+                }
+            }
+
+            stage('Install Kubernetes control') {
+                orchestrate.installKubernetesControl(saltMaster)
+            }
+        }
+
+        // install openstack
+        if (common.checkContains('STACK_INSTALL', 'openstack')) {
+            // install Infra and control, tests, ...
+
+            stage('Install OpenStack infra') {
+                orchestrate.installOpenstackInfra(saltMaster)
+            }
+
+            stage('Install OpenStack control') {
+                orchestrate.installOpenstackControl(saltMaster)
+            }
+
+            stage('Install OpenStack network') {
+
+                if (common.checkContains('STACK_INSTALL', 'contrail')) {
+                    orchestrate.installContrailNetwork(saltMaster)
+                } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
+                    orchestrate.installOpenstackNetwork(saltMaster)
+                }
+
+                salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
+                salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
+            }
+
+            if (salt.testTarget(saltMaster, 'I@ironic:conductor')){
+                stage('Install OpenStack Ironic conductor') {
+                    orchestrate.installIronicConductor(saltMaster)
+                }
+            }
+
+
+            stage('Install OpenStack compute') {
+                orchestrate.installOpenstackCompute(saltMaster)
+
+                if (common.checkContains('STACK_INSTALL', 'contrail')) {
+                    orchestrate.installContrailCompute(saltMaster)
+                }
+            }
+
+        }
+
+
+        if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
+            stage('Install StackLight v1') {
+                orchestrate.installStacklightv1Control(saltMaster)
+                orchestrate.installStacklightv1Client(saltMaster)
+            }
+        }
+
+        if (common.checkContains('STACK_INSTALL', 'stacklight')) {
+            stage('Install StackLight') {
+                orchestrate.installDockerSwarm(saltMaster)
+                orchestrate.installStacklight(saltMaster)
+            }
+        }
+
+        //
+        // Test
+        //
+        def artifacts_dir = '_artifacts/'
+
+        if (common.checkContains('STACK_TEST', 'k8s')) {
+            stage('Run k8s bootstrap tests') {
+                def image = 'tomkukral/k8s-scripts'
+                def output_file = image.replaceAll('/', '-') + '.output'
+
+                // run image
+                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
+
+                // collect output
+                sh "mkdir -p ${artifacts_dir}"
+                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+                writeFile file: "${artifacts_dir}${output_file}", text: file_content
+                sh "cat ${artifacts_dir}${output_file}"
+
+                // collect artifacts
+                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+            }
+
+            stage('Run k8s conformance e2e tests') {
+                //test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, TEST_K8S_CONFORMANCE_IMAGE)
+
+                def image = TEST_K8S_CONFORMANCE_IMAGE
+                def output_file = image.replaceAll('/', '-') + '.output'
+
+                // run image
+                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
+
+                // collect output
+                sh "mkdir -p ${artifacts_dir}"
+                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+                writeFile file: "${artifacts_dir}${output_file}", text: file_content
+                sh "cat ${artifacts_dir}${output_file}"
+
+                // collect artifacts
+                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+            }
+        }
+
+        if (common.checkContains('STACK_TEST', 'openstack')) {
+            if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
+                test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
+            }
+            stage('Run OpenStack tests') {
+                test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
+            }
+
+            stage('Copy Tempest results to config node') {
+                test.copyTempestResults(saltMaster, TEST_TEMPEST_TARGET)
+            }
+        }
+
+        if (common.checkContains('STACK_INSTALL', 'finalize')) {
+            stage('Finalize') {
+                salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
+            }
+        }
+    } catch (Throwable e) {
+        currentBuild.result = 'FAILURE'
+        throw e
+    } finally {
+
+
+        //
+        // Clean
+        //
+
+        if (STACK_TYPE == 'heat') {
+            // send notification
+            common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
+
+            if (STACK_DELETE.toBoolean() == true) {
+                common.errorMsg('Heat job cleanup triggered')
+                stage('Trigger cleanup job') {
+                    build(job: STACK_CLEANUP_JOB, parameters: [
+                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
+                        [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_URL', value: OPENSTACK_API_URL],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_CREDENTIALS', value: OPENSTACK_API_CREDENTIALS],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_DOMAIN', value: OPENSTACK_API_PROJECT_DOMAIN],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_ID', value: OPENSTACK_API_PROJECT_ID],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_USER_DOMAIN', value: OPENSTACK_API_USER_DOMAIN],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_CLIENT', value: OPENSTACK_API_CLIENT],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_VERSION', value: OPENSTACK_API_VERSION]
+                    ])
+                }
+            } else {
+                if (currentBuild.result == 'FAILURE') {
+                    common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
+
+                    if (SALT_MASTER_URL) {
+                        common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
+                    }
+                }
+
             }
         }
     }
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index a01affe..6bcb788 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -40,9 +40,9 @@
     salt.printSaltCommandResult(out)
     // wait until $check is in correct state
     if ( check == "nodetool status" ) {
-        salt.commandStatus(saltMaster, target, check, 'Status=Up')  
+        salt.commandStatus(saltMaster, target, check, 'Status=Up')
     } else if ( check == "contrail-status" ) {
-        salt.commandStatus(saltMaster, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)  
+        salt.commandStatus(saltMaster, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
     }
 
     //out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
@@ -50,443 +50,441 @@
     //input message: "Please check the output of \'${check}\' and continue if it is correct."
 }
 
-timestamps {
-    node() {
+node() {
 
-        stage('Connect to Salt API') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
+    stage('Connect to Salt API') {
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
 
-        if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true && !errorOccured) {
+    if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true && !errorOccured) {
 
-            stage('Opencontrail controllers upgrade') {
+        stage('Opencontrail controllers upgrade') {
 
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
 
-                oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                try {
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
-                } catch (Exception er) {
-                    errorOccured = true
-                    common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    return
-                }
-
-                try {
-                    salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
-                } catch (Exception er) {
-                    common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
-                    return
-                }
-
-                try {
-                    salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
-                } catch (Exception er) {
-                    common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
-                    return
-                }
-
-                args = 'apt install contrail-database -y;'
-                check = 'nodetool status'
-
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
-
-                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
-                check = 'contrail-status'
-
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
-
-                try {
-                    salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
-                }
-
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-
-                common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
-            }
-        }
-
-        if (STAGE_ANALYTICS_UPGRADE.toBoolean() == true && !errorOccured) {
-
-            stage('Ask for manual confirmation') {
-                input message: "Do you want to continue with the Opencontrail analytic nodes upgrade?"
-            }
-
-            stage('Opencontrail analytics upgrade') {
-
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-
-                oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                try {
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
-                } catch (Exception er) {
-                    errorOccured = true
-                    common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    return
-                }
-
-                args = 'apt install contrail-database -y;'
-                check = 'nodetool status'
-
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
-
-                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
-                check = 'contrail-status'
-
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
-
-                try {
-                    salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
-                }
-
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-            }
-        }
-
-        if (STAGE_COMPUTES_UPGRADE.toBoolean() == true && !errorOccured) {
+            oc_component_repo = oc_component_repo['return'][0].values()[0]
 
             try {
-
-                stage('List targeted compute servers') {
-                    minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
-
-                    if (minions.isEmpty()) {
-                        throw new Exception("No minion was targeted")
-                    }
-
-                    targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
-                    targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-                    targetLiveAll = minions.join(' or ')
-                    common.infoMsg("Found nodes: ${targetLiveAll}")
-                    common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-                }
-
-                stage('Confirm upgrade on sample nodes') {
-                    input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
-                }
-
-                stage("Opencontrail compute upgrade on sample nodes") {
-
-                    oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-                    oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                    try {
-                        salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                        salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                        salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
-                    } catch (Exception er) {
-                        errorOccured = true
-                        common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                        return
-                    }
-
-                    args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                    check = 'contrail-status'
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
-                    salt.printSaltCommandResult(out)
-
-                    try {
-                        salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
-                    } catch (Exception er) {
-                        common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
-                    }
-
-                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-
-                    //sleep(10)
-                    salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
-                    salt.printSaltCommandResult(out)
-                }
-
-                stage('Confirm upgrade on all targeted nodes') {
-                    input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
-                }
-                stage("Opencontrail compute upgrade on all targeted nodes") {
-
-                    oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-                    oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                    try {
-                        salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                        salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                        salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
-                    } catch (Exception er) {
-                        common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                        return
-                    }
-
-                    args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                    check = 'contrail-status'
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
-                    salt.printSaltCommandResult(out)
-
-                    try {
-                        salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
-                    } catch (Exception er) {
-                        common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
-                    }
-
-                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                    //sleep(10)
-                    salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
-                    salt.printSaltCommandResult(out)
-                }
-
-            } catch (Throwable e) {
-                // If there was an error or exception thrown, the build failed
-                currentBuild.result = "FAILURE"
-                throw e
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+            } catch (Exception er) {
+                errorOccured = true
+                common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                return
             }
-        }
-
-
-        if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true && !errorOccured) {
-
-            stage('Ask for manual confirmation') {
-                input message: "Do you want to continue with the Opencontrail control nodes rollback?"
-            }
-
-           stage('Opencontrail controllers rollback') {
-
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-                oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                try {
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
-                } catch (Exception er) {
-                    errorOccured = true
-                    common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    return
-                }
-
-                args = 'apt install contrail-database -y --force-yes;'
-                check = 'nodetool status'
-
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
-
-                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
-                check = 'contrail-status'
-
-                // ntw01
-                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
-                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
-                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
-
-                try {
-                    salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
-                }
-
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-
-                common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
-            }
-        }
-
-        if (STAGE_ANALYTICS_ROLLBACK.toBoolean() == true && !errorOccured) {
-
-            stage('Ask for manual confirmation') {
-                input message: "Do you want to continue with the Opencontrail analytic nodes rollback?"
-            }
-
-            stage('Opencontrail analytics rollback') {
-
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-                oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                try {
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
-                } catch (Exception er) {
-                    errorOccured = true
-                    common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    return
-                }
-
-                args = 'apt install contrail-database -y --force-yes;'
-                check = 'nodetool status'
-
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
-
-                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
-                check = 'contrail-status'
-
-                // nal01
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // nal02
-                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // nal03
-                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
-
-                try {
-                    salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
-                }
-
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-            }
-        }
-
-        if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true && !errorOccured) {
 
             try {
-
-                stage('List targeted compute servers') {
-                    minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
-
-                    if (minions.isEmpty()) {
-                        throw new Exception("No minion was targeted")
-                    }
-
-                    targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
-                    targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-                    targetLiveAll = minions.join(' or ')
-                    common.infoMsg("Found nodes: ${targetLiveAll}")
-                    common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-                }
-
-                stage('Confirm rollback on sample nodes') {
-                    input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
-                }
-
-                stage("Opencontrail compute rollback on sample nodes") {
-
-                    oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-                    oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                    try {
-                        salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                        salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                        salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
-                    } catch (Exception er) {
-                        errorOccured = true
-                        common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                        return
-                    }
-
-                    args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                    check = 'contrail-status'
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
-                    salt.printSaltCommandResult(out)
-
-                    try {
-                        salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
-                    } catch (Exception er) {
-                        common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
-                    }
-
-                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                    //sleep(10)
-                    salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
-                    salt.printSaltCommandResult(out)
-                }
-
-                stage('Confirm rollback on all targeted nodes') {
-                    input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
-                }
-
-                stage("Opencontrail compute upgrade on all targeted nodes") {
-
-                    oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-                    oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                    try {
-                        salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                        salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                        salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
-                    } catch (Exception er) {
-                        common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                        return
-                    }
-
-                    args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
-                    check = 'contrail-status'
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
-                    salt.printSaltCommandResult(out)
-
-                    try {
-                        salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
-                    } catch (Exception er) {
-                        common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
-                    }
-
-                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-
-                    //sleep(10)
-                    salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
-
-                    out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
-                    salt.printSaltCommandResult(out)
-                }
-
-            } catch (Throwable e) {
-                // If there was an error or exception thrown, the build failed
-                currentBuild.result = "FAILURE"
-                throw e
+                salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
+            } catch (Exception er) {
+                common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
+                return
             }
+
+            try {
+                salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+            } catch (Exception er) {
+                common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
+                return
+            }
+
+            args = 'apt install contrail-database -y;'
+            check = 'nodetool status'
+
+            // ntw01
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            // ntw02
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            // ntw03
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
+            check = 'contrail-status'
+
+            // ntw01
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            // ntw02
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            // ntw03
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+            try {
+                salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+            } catch (Exception er) {
+                common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
+            }
+
+            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+            salt.printSaltCommandResult(out)
+
+            common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
+        }
+    }
+
+    if (STAGE_ANALYTICS_UPGRADE.toBoolean() == true && !errorOccured) {
+
+        stage('Ask for manual confirmation') {
+            input message: "Do you want to continue with the Opencontrail analytic nodes upgrade?"
+        }
+
+        stage('Opencontrail analytics upgrade') {
+
+            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+
+            oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+            } catch (Exception er) {
+                errorOccured = true
+                common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                return
+            }
+
+            args = 'apt install contrail-database -y;'
+            check = 'nodetool status'
+
+            // nal01
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            // nal02
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            // nal03
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
+            check = 'contrail-status'
+
+            // nal01
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            // nal02
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            // nal03
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+            try {
+                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+            } catch (Exception er) {
+                common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
+            }
+
+            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+            salt.printSaltCommandResult(out)
+        }
+    }
+
+    if (STAGE_COMPUTES_UPGRADE.toBoolean() == true && !errorOccured) {
+
+        try {
+
+            stage('List targeted compute servers') {
+                minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+
+                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+            }
+
+            stage('Confirm upgrade on sample nodes') {
+                input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
+            }
+
+            stage("Opencontrail compute upgrade on sample nodes") {
+
+                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                try {
+                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+                } catch (Exception er) {
+                    errorOccured = true
+                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    return
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+
+                //sleep(10)
+                salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+            stage('Confirm upgrade on all targeted nodes') {
+                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+            }
+            stage("Opencontrail compute upgrade on all targeted nodes") {
+
+                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                try {
+                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    return
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                //sleep(10)
+                salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
+        }
+    }
+
+
+    if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+        stage('Ask for manual confirmation') {
+            input message: "Do you want to continue with the Opencontrail control nodes rollback?"
+        }
+
+       stage('Opencontrail controllers rollback') {
+
+            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+            } catch (Exception er) {
+                errorOccured = true
+                common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                return
+            }
+
+            args = 'apt install contrail-database -y --force-yes;'
+            check = 'nodetool status'
+
+            // ntw01
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            // ntw02
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            // ntw03
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
+            check = 'contrail-status'
+
+            // ntw01
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            // ntw02
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            // ntw03
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+            try {
+                salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+            } catch (Exception er) {
+                common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
+            }
+
+            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+            salt.printSaltCommandResult(out)
+
+            common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
+        }
+    }
+
+    if (STAGE_ANALYTICS_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+        stage('Ask for manual confirmation') {
+            input message: "Do you want to continue with the Opencontrail analytic nodes rollback?"
+        }
+
+        stage('Opencontrail analytics rollback') {
+
+            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+            } catch (Exception er) {
+                errorOccured = true
+                common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                return
+            }
+
+            args = 'apt install contrail-database -y --force-yes;'
+            check = 'nodetool status'
+
+            // nal01
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            // nal02
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            // nal03
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
+            check = 'contrail-status'
+
+            // nal01
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            // nal02
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            // nal03
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+            try {
+                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+            } catch (Exception er) {
+                common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
+            }
+
+            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+            salt.printSaltCommandResult(out)
+        }
+    }
+
+    if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+        try {
+
+            stage('List targeted compute servers') {
+                minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+
+                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+            }
+
+            stage('Confirm rollback on sample nodes') {
+                input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
+            }
+
+            stage("Opencontrail compute rollback on sample nodes") {
+
+                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                try {
+                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+                } catch (Exception er) {
+                    errorOccured = true
+                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    return
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                //sleep(10)
+                salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+            stage('Confirm rollback on all targeted nodes') {
+                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+            }
+
+            stage("Opencontrail compute upgrade on all targeted nodes") {
+
+                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                try {
+                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    return
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+
+                //sleep(10)
+                salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+
+                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
         }
     }
 }
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 7bf646c..9680f24 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -16,549 +16,396 @@
 
 def saltMaster
 
-timestamps {
-    node() {
+node() {
 
-        stage('Connect to Salt API') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
+    stage('Connect to Salt API') {
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
 
-        if (STAGE_TEST_UPGRADE.toBoolean() == true) {
-            stage('Test upgrade') {
+    if (STAGE_TEST_UPGRADE.toBoolean() == true) {
+        stage('Test upgrade') {
 
 
-                try {
-                    salt.enforceState(saltMaster, 'I@salt:master', 'reclass')
-                } catch (Exception e) {
-                    common.warningMsg("Some parts of Reclass state failed. The most probable reasons were uncommited changes. We should continue to run")
-                }
+            try {
+                salt.enforceState(saltMaster, 'I@salt:master', 'reclass')
+            } catch (Exception e) {
+                common.warningMsg("Some parts of Reclass state failed. The most probable reasons were uncommited changes. We should continue to run")
+            }
 
-                try {
-                    salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
-                } catch (Exception e) {
-                    common.warningMsg("No response from some minions. We should continue to run")
-                }
-                
-                try {
-                    salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
-                } catch (Exception e) {
-                    common.warningMsg("No response from some minions. We should continue to run")
-                }
+            try {
+                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+            } catch (Exception e) {
+                common.warningMsg("No response from some minions. We should continue to run")
+            }
 
-                def _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
-                def domain = _pillar['return'][0].values()[0].values()[0]
-                print(_pillar)
-                print(domain)
+            try {
+                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+            } catch (Exception e) {
+                common.warningMsg("No response from some minions. We should continue to run")
+            }
 
-                // read backupninja variable
-                _pillar = salt.getPillar(saltMaster, 'I@backupninja:client', '_param:backupninja_backup_host')
-                def backupninja_backup_host = _pillar['return'][0].values()[0]
-                print(_pillar)
-                print(backupninja_backup_host)
+            def _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+            def domain = _pillar['return'][0].values()[0].values()[0]
+            print(_pillar)
+            print(domain)
 
-                _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
-                def kvm01 = _pillar['return'][0].values()[0].values()[0]
-                print(_pillar)
-                print(kvm01)
+            // read backupninja variable
+            _pillar = salt.getPillar(saltMaster, 'I@backupninja:client', '_param:backupninja_backup_host')
+            def backupninja_backup_host = _pillar['return'][0].values()[0]
+            print(_pillar)
+            print(backupninja_backup_host)
 
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
-                def upgNodeProvider = _pillar['return'][0].values()[0]
-                print(_pillar)
-                print(upgNodeProvider)
+            _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+            def kvm01 = _pillar['return'][0].values()[0].values()[0]
+            print(_pillar)
+            print(kvm01)
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
+            def upgNodeProvider = _pillar['return'][0].values()[0]
+            print(_pillar)
+            print(upgNodeProvider)
 
 
-                salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
-
-                
-                try {
-                    salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d upg01.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg("upg01.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
-                }
+            salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
 
 
-                // salt 'kvm02*' state.sls salt.control
-                salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
+            try {
+                salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d upg01.${domain} -y")
+            } catch (Exception e) {
+                common.warningMsg("upg01.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
+            }
 
-                // wait until upg node is registered in salt-key
-                salt.minionPresent(saltMaster, 'I@salt:master', 'upg01')
 
-                // salt '*' saltutil.refresh_pillar
-                salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
-                // salt '*' saltutil.sync_all
-                salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.sync_all', [], null, true)
+            // salt 'kvm02*' state.sls salt.control
+            salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
 
-                // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
-                try {
-                    salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
-                } catch (Exception e) {
-                    common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
-                }
+            // wait until upg node is registered in salt-key
+            salt.minionPresent(saltMaster, 'I@salt:master', 'upg01')
+
+            // salt '*' saltutil.refresh_pillar
+            salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
+            // salt '*' saltutil.sync_all
+            salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.sync_all', [], null, true)
+
+            // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
+            try {
                 salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+            } catch (Exception e) {
+                common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
+            }
+            salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
 
-                // salt "upg*" state.sls rabbitmq
-                salt.enforceState(saltMaster, 'upg*', ['rabbitmq', 'memcached'])
-                try {
-                    salt.enforceState(saltMaster, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
-                } catch (Exception e) {
-                    common.warningMsg('salt-minion was restarted. We should continue to run')
-                }
-                try {
-                    salt.enforceState(saltMaster, 'I@backupninja:server', ['salt.minion'])
-                } catch (Exception e) {
-                    common.warningMsg('salt-minion was restarted. We should continue to run')
-                }
-                // salt '*' state.apply salt.minion.grains
-                //salt.enforceState(saltMaster, '*', 'salt.minion.grains')
-                // salt -C 'I@backupninja:server' state.sls backupninja
-                salt.enforceState(saltMaster, 'I@backupninja:server', 'backupninja')
-                // salt -C 'I@backupninja:client' state.sls backupninja
-                salt.enforceState(saltMaster, 'I@backupninja:client', 'backupninja')
-                salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
-                try {
-                    salt.cmdRun(saltMaster, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
-                } catch (Exception e) {
-                    common.warningMsg('The ARP entry does not exist. We should continue to run.')
-                }
-                salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
-                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
-                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+            // salt "upg*" state.sls rabbitmq
+            salt.enforceState(saltMaster, 'upg*', ['rabbitmq', 'memcached'])
+            try {
+                salt.enforceState(saltMaster, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
+            } catch (Exception e) {
+                common.warningMsg('salt-minion was restarted. We should continue to run')
+            }
+            try {
+                salt.enforceState(saltMaster, 'I@backupninja:server', ['salt.minion'])
+            } catch (Exception e) {
+                common.warningMsg('salt-minion was restarted. We should continue to run')
+            }
+            // salt '*' state.apply salt.minion.grains
+            //salt.enforceState(saltMaster, '*', 'salt.minion.grains')
+            // salt -C 'I@backupninja:server' state.sls backupninja
+            salt.enforceState(saltMaster, 'I@backupninja:server', 'backupninja')
+            // salt -C 'I@backupninja:client' state.sls backupninja
+            salt.enforceState(saltMaster, 'I@backupninja:client', 'backupninja')
+            salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
+            try {
+                salt.cmdRun(saltMaster, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
+            } catch (Exception e) {
+                common.warningMsg('The ARP entry does not exist. We should continue to run.')
+            }
+            salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
+            salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
+            salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
 
-                salt.enforceState(saltMaster, 'I@xtrabackup:server', 'xtrabackup')
-                salt.enforceState(saltMaster, 'I@xtrabackup:client', 'openssh.client')
-                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+            salt.enforceState(saltMaster, 'I@xtrabackup:server', 'xtrabackup')
+            salt.enforceState(saltMaster, 'I@xtrabackup:client', 'openssh.client')
+            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
 
-                def databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
-                if(databases && databases != ""){
-                    def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
-                    for( i = 0; i < databasesList.size(); i++){
-                        if(databasesList[i].toLowerCase().contains('upgrade')){
-                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
-                            common.warningMsg("removing database ${databasesList[i]}")
-                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
-                        }
+            def databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
+            if(databases && databases != ""){
+                def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
+                for( i = 0; i < databasesList.size(); i++){
+                    if(databasesList[i].toLowerCase().contains('upgrade')){
+                        salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+                        common.warningMsg("removing database ${databasesList[i]}")
+                        salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
                     }
-                    salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
-                }else{
-                    common.errorMsg("No _upgrade databases were returned")
                 }
+                salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
+            }else{
+                common.errorMsg("No _upgrade databases were returned")
+            }
 
-                try {
-                    salt.enforceState(saltMaster, 'upg*', 'keystone.server')
-                    salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
-                } catch (Exception e) {
-                    common.warningMsg('Restarting Apache2')
-                    salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+                salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+            } catch (Exception e) {
+                common.warningMsg('Restarting Apache2')
+                salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+            }
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+            } catch (Exception e) {
+                common.warningMsg('running keystone.client state again')
+                salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+            }
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'glance')
+            } catch (Exception e) {
+                common.warningMsg('running glance state again')
+                salt.enforceState(saltMaster, 'upg*', 'glance')
+            }
+            salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'nova')
+            } catch (Exception e) {
+                common.warningMsg('running nova state again')
+                salt.enforceState(saltMaster, 'upg*', 'nova')
+            }
+            // run nova state again as sometimes nova does not enforce itself for some reason
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'nova')
+            } catch (Exception e) {
+                common.warningMsg('running nova state again')
+                salt.enforceState(saltMaster, 'upg*', 'nova')
+            }
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'cinder')
+            } catch (Exception e) {
+                common.warningMsg('running cinder state again')
+                salt.enforceState(saltMaster, 'upg*', 'cinder')
+            }
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'neutron')
+            } catch (Exception e) {
+                common.warningMsg('running neutron state again')
+                salt.enforceState(saltMaster, 'upg*', 'neutron')
+            }
+            try {
+                salt.enforceState(saltMaster, 'upg*', 'heat')
+            } catch (Exception e) {
+                common.warningMsg('running heat state again')
+                salt.enforceState(saltMaster, 'upg*', 'heat')
+            }
+            salt.cmdRun(saltMaster, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+
+            if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
+                stage('Ask for manual confirmation') {
+                    input message: "Do you want to continue with upgrade?"
                 }
+            }
+        }
+    }
+
+    if (STAGE_REAL_UPGRADE.toBoolean() == true) {
+        stage('Real upgrade') {
+            // # actual upgrade
+
+            _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+            domain = _pillar['return'][0].values()[0].values()[0]
+            print(_pillar)
+            print(domain)
+
+            _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+            kvm01 = _pillar['return'][0].values()[0].values()[0]
+            print(_pillar)
+            print(kvm01)
+
+            def errorOccured = false
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+            def ctl01NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+            def ctl02NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+            def ctl03NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+            def prx01NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+            def prx02NodeProvider = _pillar['return'][0].values()[0]
+
+
+            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+
+
+            try {
+                salt.cmdRun(saltMaster, "${prx01NodeProvider}", "[ ! -f /root/prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak")
+            } catch (Exception e) {
+                common.warningMsg('File already exists')
+            }
+            try {
+                salt.cmdRun(saltMaster, "${prx02NodeProvider}", "[ ! -f /root/prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak")
+            } catch (Exception e) {
+                common.warningMsg('File already exists')
+            }
+            try {
+                salt.cmdRun(saltMaster, "${ctl01NodeProvider}", "[ ! -f /root/ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak")
+            } catch (Exception e) {
+                common.warningMsg('File already exists')
+            }
+            try {
+                salt.cmdRun(saltMaster, "${ctl02NodeProvider}", "[ ! -f /root/ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak")
+            } catch (Exception e) {
+                common.warningMsg('File already exists')
+            }
+            try {
+                salt.cmdRun(saltMaster, "${ctl03NodeProvider}", "[ ! -f /root/ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak")
+            } catch (Exception e) {
+                common.warningMsg('File already exists')
+            }
+
+
+            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.undefine', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.undefine', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.undefine', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
+
+            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+
+            try {
+                salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+            } catch (Exception e) {
+                common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+            }
+
+            // salt 'kvm*' state.sls salt.control
+            salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
+
+            // wait until ctl and prx nodes are registered in salt-key
+            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
+
+
+            // salt '*' saltutil.refresh_pillar
+            salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+            // salt '*' saltutil.sync_all
+            salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+
+            try {
+                salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+            } catch (Exception e) {
+                common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
+            }
+            salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+
+            // salt 'ctl*' state.sls keepalived
+            // salt 'ctl*' state.sls haproxy
+            salt.enforceState(saltMaster, 'ctl*', ['keepalived', 'haproxy'])
+            // salt 'ctl*' service.restart rsyslog
+            salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['rsyslog'], null, true)
+            // salt "ctl*" state.sls memcached
+            // salt "ctl*" state.sls keystone.server
+            try {
                 try {
-                    salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                    salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
+                    salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
+                } catch (Exception e) {
+                    common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
+                    salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
+                    salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                }
+                // salt 'ctl01*' state.sls keystone.client
+                try {
+                    salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
                 } catch (Exception e) {
                     common.warningMsg('running keystone.client state again')
-                    salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                    salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
                 }
                 try {
-                    salt.enforceState(saltMaster, 'upg*', 'glance')
+                    salt.enforceState(saltMaster, 'ctl*', 'glance')
                 } catch (Exception e) {
                     common.warningMsg('running glance state again')
-                    salt.enforceState(saltMaster, 'upg*', 'glance')
-                }
-                salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+                    salt.enforceState(saltMaster, 'ctl*', 'glance')
+                }                // salt 'ctl*' state.sls glusterfs.client
+                salt.enforceState(saltMaster, 'ctl*', 'glusterfs.client')
+                // salt 'ctl*' state.sls keystone.server
+                salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                // salt 'ctl*' state.sls nova
                 try {
-                    salt.enforceState(saltMaster, 'upg*', 'nova')
+                    salt.enforceState(saltMaster, 'ctl*', 'nova')
                 } catch (Exception e) {
                     common.warningMsg('running nova state again')
-                    salt.enforceState(saltMaster, 'upg*', 'nova')
+                    salt.enforceState(saltMaster, 'ctl*', 'nova')
                 }
-                // run nova state again as sometimes nova does not enforce itself for some reason
+                // salt 'ctl*' state.sls cinder
                 try {
-                    salt.enforceState(saltMaster, 'upg*', 'nova')
-                } catch (Exception e) {
-                    common.warningMsg('running nova state again')
-                    salt.enforceState(saltMaster, 'upg*', 'nova')
-                }
-                try {
-                    salt.enforceState(saltMaster, 'upg*', 'cinder')
+                    salt.enforceState(saltMaster, 'ctl*', 'cinder')
                 } catch (Exception e) {
                     common.warningMsg('running cinder state again')
-                    salt.enforceState(saltMaster, 'upg*', 'cinder')
-                }                
+                    salt.enforceState(saltMaster, 'ctl*', 'cinder')
+                }
                 try {
-                    salt.enforceState(saltMaster, 'upg*', 'neutron')
+                    salt.enforceState(saltMaster, 'ctl*', 'neutron')
                 } catch (Exception e) {
                     common.warningMsg('running neutron state again')
-                    salt.enforceState(saltMaster, 'upg*', 'neutron')
+                    salt.enforceState(saltMaster, 'ctl*', 'neutron')
                 }
+                // salt 'ctl*' state.sls heat
                 try {
-                    salt.enforceState(saltMaster, 'upg*', 'heat')
+                    salt.enforceState(saltMaster, 'ctl*', 'heat')
                 } catch (Exception e) {
                     common.warningMsg('running heat state again')
-                    salt.enforceState(saltMaster, 'upg*', 'heat')
-                }
-                salt.cmdRun(saltMaster, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-
-                if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
-                    stage('Ask for manual confirmation') {
-                        input message: "Do you want to continue with upgrade?"
-                    }
-                }
-            }
-        }
-
-        if (STAGE_REAL_UPGRADE.toBoolean() == true) {
-            stage('Real upgrade') {
-                // # actual upgrade
-
-                _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
-                domain = _pillar['return'][0].values()[0].values()[0]
-                print(_pillar)
-                print(domain)
-
-                _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
-                kvm01 = _pillar['return'][0].values()[0].values()[0]
-                print(_pillar)
-                print(kvm01)
-
-                def errorOccured = false
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
-                def ctl01NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
-                def ctl02NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
-                def ctl03NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
-                def prx01NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
-                def prx02NodeProvider = _pillar['return'][0].values()[0]
-
-
-                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
-
-
-                try {
-                    salt.cmdRun(saltMaster, "${prx01NodeProvider}", "[ ! -f /root/prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak")
-                } catch (Exception e) {
-                    common.warningMsg('File already exists')
-                }
-                try {
-                    salt.cmdRun(saltMaster, "${prx02NodeProvider}", "[ ! -f /root/prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak")
-                } catch (Exception e) {
-                    common.warningMsg('File already exists')
-                }
-                try {
-                    salt.cmdRun(saltMaster, "${ctl01NodeProvider}", "[ ! -f /root/ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak")
-                } catch (Exception e) {
-                    common.warningMsg('File already exists')
-                }
-                try {
-                    salt.cmdRun(saltMaster, "${ctl02NodeProvider}", "[ ! -f /root/ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak")
-                } catch (Exception e) {
-                    common.warningMsg('File already exists')
-                }
-                try {
-                    salt.cmdRun(saltMaster, "${ctl03NodeProvider}", "[ ! -f /root/ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak")
-                } catch (Exception e) {
-                    common.warningMsg('File already exists')
+                    salt.enforceState(saltMaster, 'ctl*', 'heat')
                 }
 
-
-                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.undefine', ["prx01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.undefine', ["prx02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.undefine', ["ctl01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
-
-                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
-
-                try {
-                    salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
-                }
-
-                // salt 'kvm*' state.sls salt.control
-                salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
-
-                // wait until ctl and prx nodes are registered in salt-key
-                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
-
-
-                // salt '*' saltutil.refresh_pillar
-                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
-                // salt '*' saltutil.sync_all
-                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
-
-                try {
-                    salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
-                } catch (Exception e) {
-                    common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
-                }
-                salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
-
-                // salt 'ctl*' state.sls keepalived
-                // salt 'ctl*' state.sls haproxy
-                salt.enforceState(saltMaster, 'ctl*', ['keepalived', 'haproxy'])
-                // salt 'ctl*' service.restart rsyslog
-                salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['rsyslog'], null, true)
-                // salt "ctl*" state.sls memcached
-                // salt "ctl*" state.sls keystone.server
-                try {
-                    try {
-                        salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
-                        salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
-                    } catch (Exception e) {
-                        common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
-                        salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
-                        salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
-                    }
-                    // salt 'ctl01*' state.sls keystone.client
-                    try {
-                        salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
-                    } catch (Exception e) {
-                        common.warningMsg('running keystone.client state again')
-                        salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
-                    } 
-                    try {
-                        salt.enforceState(saltMaster, 'ctl*', 'glance')
-                    } catch (Exception e) {
-                        common.warningMsg('running glance state again')
-                        salt.enforceState(saltMaster, 'ctl*', 'glance')
-                    }                // salt 'ctl*' state.sls glusterfs.client
-                    salt.enforceState(saltMaster, 'ctl*', 'glusterfs.client')
-                    // salt 'ctl*' state.sls keystone.server
-                    salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
-                    // salt 'ctl*' state.sls nova
-                    try {
-                        salt.enforceState(saltMaster, 'ctl*', 'nova')
-                    } catch (Exception e) {
-                        common.warningMsg('running nova state again')
-                        salt.enforceState(saltMaster, 'ctl*', 'nova')
-                    }
-                    // salt 'ctl*' state.sls cinder
-                    try {
-                        salt.enforceState(saltMaster, 'ctl*', 'cinder')
-                    } catch (Exception e) {
-                        common.warningMsg('running cinder state again')
-                        salt.enforceState(saltMaster, 'ctl*', 'cinder')
-                    }                
-                    try {
-                        salt.enforceState(saltMaster, 'ctl*', 'neutron')
-                    } catch (Exception e) {
-                        common.warningMsg('running neutron state again')
-                        salt.enforceState(saltMaster, 'ctl*', 'neutron')
-                    }
-                    // salt 'ctl*' state.sls heat
-                    try {
-                        salt.enforceState(saltMaster, 'ctl*', 'heat')
-                    } catch (Exception e) {
-                        common.warningMsg('running heat state again')
-                        salt.enforceState(saltMaster, 'ctl*', 'heat')
-                    }
-
-                } catch (Exception e) {
-                    errorOccured = true
-                    common.warningMsg('Some states that require syncdb failed. Restoring production databases')
-
-                    // database restore section
-                    try {
-                        salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-                    } catch (Exception er) {
-                        common.warningMsg('Mysql service already stopped')
-                    }
-                    try {
-                        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-                    } catch (Exception er) {
-                        common.warningMsg('Mysql service already stopped')
-                    }
-                    try {
-                        salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-                    } catch (Exception er) {
-                        common.warningMsg('Files are not present')
-                    }
-                    try {
-                        salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
-                    } catch (Exception er) {
-                        common.warningMsg('Directory already exists')
-                    }
-                    try {
-                        salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
-                    } catch (Exception er) {
-                        common.warningMsg('Directory already empty')
-                    }
-                    try {
-                        salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
-                    } catch (Exception er) {
-                        common.warningMsg('Files were already moved')
-                    }
-                    try {
-                        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-                    } catch (Exception er) {
-                        common.warningMsg('File is not present')
-                    }
-                    salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-                    _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
-                    backup_dir = _pillar['return'][0].values()[0]
-                    if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
-                    print(backup_dir)
-                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-                    salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
-
-                    // wait until mysql service on galera master is up
-                    salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
-
-                    salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
-                    //
-
-                    common.errorMsg("Stage Real control upgrade failed")
-                }
-                if(!errorOccured){
-
-                    ceph = null
-
-                    try {
-                        ceph = salt.cmdRun(saltMaster, 'ctl*', "salt-call grains.item roles | grep ceph.client")
-
-                    } catch (Exception er) {
-                        common.infoMsg("Ceph is not used")
-                    }
-
-                    if(ceph != null) {
-                        try {
-                            salt.enforceState(saltMaster, 'ctl*', 'ceph.client')
-                        } catch (Exception er) {
-                            common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
-                        }
-                    }
-
-                    // salt 'cmp*' cmd.run 'service nova-compute restart'
-                    salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
-                    salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
-                    salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
-
-
-                    // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
-                    // salt 'ctl*' state.sls keepalived
-                    // salt 'prx*' state.sls keepalived
-                    salt.enforceState(saltMaster, 'prx*', 'keepalived')
-                    // salt 'prx*' state.sls horizon
-                    salt.enforceState(saltMaster, 'prx*', 'horizon')
-                    // salt 'prx*' state.sls nginx
-                    salt.enforceState(saltMaster, 'prx*', 'nginx')
-                    // salt "prx*" state.sls memcached
-                    salt.enforceState(saltMaster, 'prx*', 'memcached')
-
-                    try {
-                        salt.enforceHighstate(saltMaster, 'ctl*')
-                    } catch (Exception er) {
-                        common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
-                    }
-
-                    try {
-                        salt.enforceHighstate(saltMaster, 'prx*')
-                    } catch (Exception er) {
-                        common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
-                    }
-
-                    salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-                }
-            }
-
-            if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
-                stage('Ask for manual confirmation') {
-                    input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click YES to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
-                }
-            }
-        }
-
-        if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
-            stage('Rollback upgrade') {
-
-                stage('Ask for manual confirmation') {
-                    input message: "Do you really want to continue with the rollback?"
-                }
-
-                _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
-                domain = _pillar['return'][0].values()[0].values()[0]
-                print(_pillar)
-                print(domain)
-
-                _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
-                kvm01 = _pillar['return'][0].values()[0].values()[0]
-                print(_pillar)
-                print(kvm01)
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
-                def ctl01NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
-                def ctl02NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
-                def ctl03NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
-                def prx01NodeProvider = _pillar['return'][0].values()[0]
-
-                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
-                def prx02NodeProvider = _pillar['return'][0].values()[0]
-
-                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
-
-                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'file.copy', ["/root/prx01.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx01.${domain}/system.qcow2"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'file.copy', ["/root/prx02.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx02.${domain}/system.qcow2"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'file.copy', ["/root/ctl01.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl01.${domain}/system.qcow2"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'file.copy', ["/root/ctl02.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl02.${domain}/system.qcow2"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'file.copy', ["/root/ctl03.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl03.${domain}/system.qcow2"], null, true)
-
-                try {
-                    salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
-                }
+            } catch (Exception e) {
+                errorOccured = true
+                common.warningMsg('Some states that require syncdb failed. Restoring production databases')
 
                 // database restore section
                 try {
                     salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-                } catch (Exception e) {
+                } catch (Exception er) {
                     common.warningMsg('Mysql service already stopped')
                 }
                 try {
                     salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-                } catch (Exception e) {
+                } catch (Exception er) {
                     common.warningMsg('Mysql service already stopped')
                 }
                 try {
                     salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-                } catch (Exception e) {
+                } catch (Exception er) {
                     common.warningMsg('Files are not present')
                 }
                 try {
-                    salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
-                } catch (Exception e) {
+                    salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+                } catch (Exception er) {
+                    common.warningMsg('Directory already exists')
+                }
+                try {
+                    salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+                } catch (Exception er) {
                     common.warningMsg('Directory already empty')
                 }
                 try {
+                    salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+                } catch (Exception er) {
+                    common.warningMsg('Files were already moved')
+                }
+                try {
                     salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-                } catch (Exception e) {
+                } catch (Exception er) {
                     common.warningMsg('File is not present')
                 }
                 salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
@@ -576,27 +423,178 @@
                 salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
                 //
 
-                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.start', ["ctl02.${domain}"], null, true)
-                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.start', ["ctl03.${domain}"], null, true)
+                common.errorMsg("Stage Real control upgrade failed")
+            }
+            if(!errorOccured){
+
+                ceph = null
+
+                try {
+                    ceph = salt.cmdRun(saltMaster, 'ctl*', "salt-call grains.item roles | grep ceph.client")
+
+                } catch (Exception er) {
+                    common.infoMsg("Ceph is not used")
+                }
+
+                if(ceph != null) {
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'ceph.client')
+                    } catch (Exception er) {
+                        common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
+                    }
+                }
 
                 // salt 'cmp*' cmd.run 'service nova-compute restart'
                 salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
-
-                // wait until ctl and prx nodes are registered in salt-key
-                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
-                salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
-
                 salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
                 salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
 
-                salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
+
+                // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
+                // salt 'ctl*' state.sls keepalived
+                // salt 'prx*' state.sls keepalived
+                salt.enforceState(saltMaster, 'prx*', 'keepalived')
+                // salt 'prx*' state.sls horizon
+                salt.enforceState(saltMaster, 'prx*', 'horizon')
+                // salt 'prx*' state.sls nginx
+                salt.enforceState(saltMaster, 'prx*', 'nginx')
+                // salt "prx*" state.sls memcached
+                salt.enforceState(saltMaster, 'prx*', 'memcached')
+
+                try {
+                    salt.enforceHighstate(saltMaster, 'ctl*')
+                } catch (Exception er) {
+                    common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
+                }
+
+                try {
+                    salt.enforceHighstate(saltMaster, 'prx*')
+                } catch (Exception er) {
+                    common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
+                }
+
+                salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
             }
         }
+
+        if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
+            stage('Ask for manual confirmation') {
+                input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click YES to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
+            }
+        }
+    }
+
+    if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
+        stage('Rollback upgrade') {
+
+            stage('Ask for manual confirmation') {
+                input message: "Do you really want to continue with the rollback?"
+            }
+
+            _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+            domain = _pillar['return'][0].values()[0].values()[0]
+            print(_pillar)
+            print(domain)
+
+            _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+            kvm01 = _pillar['return'][0].values()[0].values()[0]
+            print(_pillar)
+            print(kvm01)
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+            def ctl01NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+            def ctl02NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+            def ctl03NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+            def prx01NodeProvider = _pillar['return'][0].values()[0]
+
+            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+            def prx02NodeProvider = _pillar['return'][0].values()[0]
+
+            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+
+            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'file.copy', ["/root/prx01.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx01.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'file.copy', ["/root/prx02.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx02.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'file.copy', ["/root/ctl01.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl01.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'file.copy', ["/root/ctl02.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl02.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'file.copy', ["/root/ctl03.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl03.${domain}/system.qcow2"], null, true)
+
+            try {
+                salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+            } catch (Exception e) {
+                common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+            }
+
+            // database restore section
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+            } catch (Exception e) {
+                common.warningMsg('Mysql service already stopped')
+            }
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+            } catch (Exception e) {
+                common.warningMsg('Mysql service already stopped')
+            }
+            try {
+                salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+            } catch (Exception e) {
+                common.warningMsg('Files are not present')
+            }
+            try {
+                salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+            } catch (Exception e) {
+                common.warningMsg('Directory already empty')
+            }
+            try {
+                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+            } catch (Exception e) {
+                common.warningMsg('File is not present')
+            }
+            salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+            _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+            backup_dir = _pillar['return'][0].values()[0]
+            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+            print(backup_dir)
+            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+
+            // wait until mysql service on galera master is up
+            salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+
+            salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+            //
+
+            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.start', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.start', ["ctl03.${domain}"], null, true)
+
+            // salt 'cmp*' cmd.run 'service nova-compute restart'
+            salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+
+            // wait until ctl and prx nodes are registered in salt-key
+            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
+            salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
+
+            salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
+            salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
+
+            salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
+        }
     }
 }
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 4a9c89e..89afc6c 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -13,70 +13,68 @@
 
 def saltMaster
 
-timestamps {
-    node() {
+node() {
 
-        stage('Connect to Salt API') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Connect to Salt API') {
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    stage('Start restore') {
+        // # actual upgrade
+
+        stage('Ask for manual confirmation') {
+            input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Cassandra?"
+        }
+        // Cassandra restore section
+        try {
+            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
+        } catch (Exception er) {
+            common.warningMsg('Supervisor-database service already stopped')
+        }
+        try {
+            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
+        } catch (Exception er) {
+            common.warningMsg('Directory already exists')
         }
 
-        stage('Start restore') {
-            // # actual upgrade
-
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Cassandra?"
-            }
-            // Cassandra restore section
-            try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-database service already stopped')
-            }
-            try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
-            } catch (Exception er) {
-                common.warningMsg('Directory already exists')
-            }
-
-            try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
-            } catch (Exception er) {
-                common.warningMsg('Files were already moved')
-            }
-            try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
-            } catch (Exception er) {
-                common.warningMsg('Directory already empty')
-            }
-
-            _pillar = salt.getPillar(saltMaster, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
-            print(backup_dir)
-            salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-
-            salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
-
-            // wait until supervisor-database service is up
-            salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-
-            // performs restore
-            salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
-            salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-
-            // wait until supervisor-database service is up
-            salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            salt.commandStatus(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            sleep(5)
-
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
-
-            // wait until contrail-status is up
-            salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-            
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "nodetool status")
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
+        try {
+            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
+        } catch (Exception er) {
+            common.warningMsg('Files were already moved')
         }
+        try {
+            salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
+        } catch (Exception er) {
+            common.warningMsg('Directory already empty')
+        }
+
+        _pillar = salt.getPillar(saltMaster, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
+        backup_dir = _pillar['return'][0].values()[0]
+        if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
+        print(backup_dir)
+        salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+
+        salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
+
+        // wait until supervisor-database service is up
+        salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+
+        // performs restore
+        salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
+        salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+
+        // wait until supervisor-database service is up
+        salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+        salt.commandStatus(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+        sleep(5)
+
+        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+
+        // wait until contrail-status is up
+        salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+
+        salt.cmdRun(saltMaster, 'I@opencontrail:control', "nodetool status")
+        salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
     }
 }
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index 3e5da6c..3ead8aa 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -13,82 +13,77 @@
 
 def saltMaster
 
-timestamps {
-    node() {
+node() {
 
-        stage('Connect to Salt API') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Connect to Salt API') {
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    stage('Start restore') {
+        // # actual upgrade
+
+        stage('Ask for manual confirmation') {
+            input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Zookeeper?"
+        }
+        // Zookeeper restore section
+        try {
+            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+        } catch (Exception er) {
+            common.warningMsg('Supervisor-config service already stopped')
+        }
+        try {
+            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
+        } catch (Exception er) {
+            common.warningMsg('Supervisor-control service already stopped')
+        }
+        try {
+            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
+        } catch (Exception er) {
+            common.warningMsg('Zookeeper service already stopped')
+        }
+        //sleep(5)
+        // wait until zookeeper service is down
+        salt.commandStatus(saltMaster, 'I@opencontrail:control', 'service zookeeper status', 'stop')
+
+        try {
+            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
+        } catch (Exception er) {
+            common.warningMsg('Directory already exists')
         }
 
-        stage('Start restore') {
-            // # actual upgrade
-
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Zookeeper?"
-            }
-            // Zookeeper restore section
-            try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-config service already stopped')
-            }
-            try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-control service already stopped')
-            }
-            try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Zookeeper service already stopped')
-            }
-            //sleep(5)
-            // wait until zookeeper service is down
-            salt.commandStatus(saltMaster, 'I@opencontrail:control', 'service zookeeper status', 'stop')
-
-            try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
-            } catch (Exception er) {
-                common.warningMsg('Directory already exists')
-            }
-
-            try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
-            } catch (Exception er) {
-                common.warningMsg('Files were already moved')
-            }
-            try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
-            } catch (Exception er) {
-                common.warningMsg('Directory already empty')
-            }
-
-            _pillar = salt.getPillar(saltMaster, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/zookeeper' }
-            print(backup_dir)
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-
-            // performs restore
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
-
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
-
-            // wait until contrail-status is up
-            salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-            
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
-            try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "echo stat | nc localhost 2181")
-            } catch (Exception er) {
-                common.warningMsg('Check which node is zookeeper leader')
-            }
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
+        try {
+            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
+        } catch (Exception er) {
+            common.warningMsg('Files were already moved')
         }
+        try {
+            salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
+        } catch (Exception er) {
+            common.warningMsg('Directory already empty')
+        }
+
+        _pillar = salt.getPillar(saltMaster, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
+        backup_dir = _pillar['return'][0].values()[0]
+        if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/zookeeper' }
+        print(backup_dir)
+        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+
+        // performs restore
+        salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
+
+        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
+        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
+        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
+
+        // wait until contrail-status is up
+        salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+
+        salt.cmdRun(saltMaster, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
+        try {
+            salt.cmdRun(saltMaster, 'I@opencontrail:control', "echo stat | nc localhost 2181")
+        } catch (Exception er) {
+            common.warningMsg('Check which node is zookeeper leader')
+        }
+        salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
     }
 }
-
-
-
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index cff994d..db1086a 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -92,82 +92,80 @@
   gerritRef = null
 }
 
-timestamps {
-    node("python&&docker") {
-        def templateEnv = "${env.WORKSPACE}"
-        def cutterEnv = "${env.WORKSPACE}/cutter"
-        def jinjaEnv = "${env.WORKSPACE}/jinja"
+node("python&&docker") {
+    def templateEnv = "${env.WORKSPACE}"
+    def cutterEnv = "${env.WORKSPACE}/cutter"
+    def jinjaEnv = "${env.WORKSPACE}/jinja"
 
-        try {
-            stage("Cleanup") {
-                sh("rm -rf * || true")
-            }
-
-            stage ('Download Cookiecutter template') {
-                if (gerritRef) {
-                    def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
-                    merged = gerritChange.status == "MERGED"
-                    if(!merged){
-                        checkouted = gerrit.gerritPatchsetCheckout ([
-                            credentialsId : CREDENTIALS_ID
-                        ])
-                    } else{
-                        common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
-                    }
-                } else {
-                    git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
-                }
-            }
-
-            stage("Setup") {
-                python.setupCookiecutterVirtualenv(cutterEnv)
-            }
-
-            def contextFiles
-            dir("${templateEnv}/contexts") {
-                contextFiles = findFiles(glob: "*.yml")
-            }
-
-            def contextFileList = []
-            for (int i = 0; i < contextFiles.size(); i++) {
-                contextFileList << contextFiles[i]
-            }
-
-            stage("generate-model") {
-                for (contextFile in contextFileList) {
-                    generateModel(contextFile, cutterEnv)
-                }
-            }
-
-            dir("${env.WORKSPACE}") {
-                sh(returnStatus: true, script: "tar -zcvf model.tar.gz -C model .")
-                archiveArtifacts artifacts: "model.tar.gz"
-            }
-
-            stage("test-nodes") {
-                def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
-                def buildSteps = [:]
-                for (int i = 0; i < partitions.size(); i++) {
-                    def partition = partitions[i]
-                    buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
-                    for(int k = 0; k < partition.size; k++){
-                        def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
-                        def testEnv = "${env.WORKSPACE}/model/${basename}"
-                        buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv) })
-                    }
-                }
-                common.serial(buildSteps)
-            }
-
-            stage ('Clean workspace directories') {
-                sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
-            }
-
-        } catch (Throwable e) {
-             currentBuild.result = "FAILURE"
-             throw e
-        } finally {
-            common.sendNotification(currentBuild.result,"",["slack"])
+    try {
+        stage("Cleanup") {
+            sh("rm -rf * || true")
         }
+
+        stage ('Download Cookiecutter template') {
+            if (gerritRef) {
+                def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
+                merged = gerritChange.status == "MERGED"
+                if(!merged){
+                    checkouted = gerrit.gerritPatchsetCheckout ([
+                        credentialsId : CREDENTIALS_ID
+                    ])
+                } else{
+                    common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+                }
+            } else {
+                git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
+            }
+        }
+
+        stage("Setup") {
+            python.setupCookiecutterVirtualenv(cutterEnv)
+        }
+
+        def contextFiles
+        dir("${templateEnv}/contexts") {
+            contextFiles = findFiles(glob: "*.yml")
+        }
+
+        def contextFileList = []
+        for (int i = 0; i < contextFiles.size(); i++) {
+            contextFileList << contextFiles[i]
+        }
+
+        stage("generate-model") {
+            for (contextFile in contextFileList) {
+                generateModel(contextFile, cutterEnv)
+            }
+        }
+
+        dir("${env.WORKSPACE}") {
+            sh(returnStatus: true, script: "tar -zcvf model.tar.gz -C model .")
+            archiveArtifacts artifacts: "model.tar.gz"
+        }
+
+        stage("test-nodes") {
+            def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
+            def buildSteps = [:]
+            for (int i = 0; i < partitions.size(); i++) {
+                def partition = partitions[i]
+                buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
+                for(int k = 0; k < partition.size; k++){
+                    def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
+                    def testEnv = "${env.WORKSPACE}/model/${basename}"
+                    buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv) })
+                }
+            }
+            common.serial(buildSteps)
+        }
+
+        stage ('Clean workspace directories') {
+            sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
+        }
+
+    } catch (Throwable e) {
+         currentBuild.result = "FAILURE"
+         throw e
+    } finally {
+        common.sendNotification(currentBuild.result,"",["slack"])
     }
 }
diff --git a/test-openstack-component-pipeline.groovy b/test-openstack-component-pipeline.groovy
index d3504d3..f6ba0b7 100644
--- a/test-openstack-component-pipeline.groovy
+++ b/test-openstack-component-pipeline.groovy
@@ -14,18 +14,15 @@
 def common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 
+node {
+    def cred = common.getCredentials(CREDENTIALS_ID, 'key')
+    def gerritChange = gerrit.getGerritChange(cred.username, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
 
-timestamps {
-    node {
-        def cred = common.getCredentials(CREDENTIALS_ID, 'key')
-        def gerritChange = gerrit.getGerritChange(cred.username, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
-
-        stage('Trigger deploy job') {
-            build(job: STACK_DEPLOY_JOB, parameters: [
-                [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: 'mcp-oscore'],
-                [$class: 'StringParameterValue', name: 'STACK_TEST', value: ''],
-                [$class: 'BooleanParameterValue', name: 'TEST_DOCKER_INSTALL', value: false]
-            ])
-        }
+    stage('Trigger deploy job') {
+        build(job: STACK_DEPLOY_JOB, parameters: [
+            [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: 'mcp-oscore'],
+            [$class: 'StringParameterValue', name: 'STACK_TEST', value: ''],
+            [$class: 'BooleanParameterValue', name: 'TEST_DOCKER_INSTALL', value: false]
+        ])
     }
 }
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
index 345f280..6401789 100644
--- a/xtrabackup-restore-mysql-db.groovy
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -13,74 +13,69 @@
 
 def saltMaster
 
-timestamps {
-    node() {
+node() {
 
-        stage('Connect to Salt API') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Connect to Salt API') {
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    stage('Start restore') {
+        // # actual upgrade
+
+        stage('Ask for manual confirmation') {
+            input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
         }
-
-        stage('Start restore') {
-            // # actual upgrade
-
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
-            }
-            // database restore section
-            try {
-                salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-            } catch (Exception er) {
-                common.warningMsg('Files are not present')
-            }
-            try {
-                salt.cmdRun(saltMaster, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
-            } catch (Exception er) {
-                common.warningMsg('Directory already exists')
-            }
-            try {
-                salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
-            } catch (Exception er) {
-                common.warningMsg('Files were already moved')
-            }
-            try {
-                salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
-            } catch (Exception er) {
-                common.warningMsg('Directory already empty')
-            }
-            try {
-                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-            } catch (Exception er) {
-                common.warningMsg('File is not present')
-            }
-            salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-            _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
-            print(backup_dir)
-            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
-
-            // wait until mysql service on galera master is up
-            salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
-
-            salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
-            sleep(15)
-            salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
-
+        // database restore section
+        try {
+            salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+        } catch (Exception er) {
+            common.warningMsg('Mysql service already stopped')
         }
+        try {
+            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+        } catch (Exception er) {
+            common.warningMsg('Mysql service already stopped')
+        }
+        try {
+            salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+        } catch (Exception er) {
+            common.warningMsg('Files are not present')
+        }
+        try {
+            salt.cmdRun(saltMaster, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
+        } catch (Exception er) {
+            common.warningMsg('Directory already exists')
+        }
+        try {
+            salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+        } catch (Exception er) {
+            common.warningMsg('Files were already moved')
+        }
+        try {
+            salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+        } catch (Exception er) {
+            common.warningMsg('Directory already empty')
+        }
+        try {
+            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+        } catch (Exception er) {
+            common.warningMsg('File is not present')
+        }
+        salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+        _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+        backup_dir = _pillar['return'][0].values()[0]
+        if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+        print(backup_dir)
+        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+        salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+
+        // wait until mysql service on galera master is up
+        salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+
+        salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+        sleep(15)
+        salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
+
     }
 }
-
-
-