Added 12h timeout to all pipelines

Change-Id: I085fcbda322d0877d5ffebd002fc109577788c29
diff --git a/aptly-promote-pipeline.groovy b/aptly-promote-pipeline.groovy
index 99a07e1..c196ccd 100644
--- a/aptly-promote-pipeline.groovy
+++ b/aptly-promote-pipeline.groovy
@@ -22,27 +22,27 @@
 } catch (MissingPropertyException e) {
     storages = ['local']
 }
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+    try{
+      stage("promote") {
+        lock("aptly-api") {
+          for (storage in storages) {
 
-node() {
-  try{
-    stage("promote") {
-      lock("aptly-api") {
-        for (storage in storages) {
-
-          if (storage == "local") {
-            storage = ""
+            if (storage == "local") {
+              storage = ""
+            }
+            aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
           }
-
-          aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
         }
       }
+    } catch (Throwable e) {
+       // If there was an error or exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
     }
-  } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-     throw e
-  } finally {
-     common.sendNotification(currentBuild.result,"",["slack"])
   }
-}
+}
\ No newline at end of file
diff --git a/artifactory-promote-docker-image.groovy b/artifactory-promote-docker-image.groovy
index e278f05..3a33da6 100644
--- a/artifactory-promote-docker-image.groovy
+++ b/artifactory-promote-docker-image.groovy
@@ -42,26 +42,27 @@
 
 String img_src_name, img_src_tag
 String img_dst_name, img_dst_tag
+timeout(time: 12, unit: 'HOURS') {
+    node(slave_label) {
+        (img_src_name, img_src_tag) = image_src.tokenize(_colon)
+        (img_dst_name, img_dst_tag) = image_dst.tokenize(_colon)
 
-node(slave_label) {
-    (img_src_name, img_src_tag) = image_src.tokenize(_colon)
-    (img_dst_name, img_dst_tag) = image_dst.tokenize(_colon)
+        String api_req = JsonOutput.toJson([
+            targetRepo: repo_dst,
+            dockerRepository: img_src_name,
+            targetDockerRepository: img_dst_name,
+            tag: img_src_tag,
+            targetTag: img_dst_tag,
+            copy: copy_image,
+        ])
 
-    String api_req = JsonOutput.toJson([
-        targetRepo: repo_dst,
-        dockerRepository: img_src_name,
-        targetDockerRepository: img_dst_name,
-        tag: img_src_tag,
-        targetTag: img_dst_tag,
-        copy: copy_image,
-    ])
-
-    withCredentials([usernameColonPassword(credentialsId: artifactory_creds, variable: 'USERPASS')]) {
-        sh """
-            curl -fLsS \
-                -u \$USERPASS \
-                -X POST -d '${api_req}' -H 'Content-Type: application/json' \
-                '${artifactory_url}api/docker/${repo_src}/v2/promote'
-        """
+        withCredentials([usernameColonPassword(credentialsId: artifactory_creds, variable: 'USERPASS')]) {
+            sh """
+                curl -fLsS \
+                    -u \$USERPASS \
+                    -X POST -d '${api_req}' -H 'Content-Type: application/json' \
+                    '${artifactory_url}api/docker/${repo_src}/v2/promote'
+            """
+        }
     }
 }
diff --git a/build-debian-packages-influxdb-relay.groovy b/build-debian-packages-influxdb-relay.groovy
index 6040849..3a9bc03 100644
--- a/build-debian-packages-influxdb-relay.groovy
+++ b/build-debian-packages-influxdb-relay.groovy
@@ -5,102 +5,103 @@
 
 def timestamp = common.getDatetime()
 def version = "1.0~${timestamp}"
+timeout(time: 12, unit: 'HOURS') {
+    node('docker') {
+        try{
 
-node('docker') {
-    try{
+            stage("cleanup") {
+                sh("rm -rf * || true")
+            }
 
-        stage("cleanup") {
-            sh("rm -rf * || true")
-        }
-
-        def workingDir = "src/github.com/influxdata"
-        stage("checkout") {
-            git.checkoutGitRepository(
-                "${workingDir}/influxdb-relay",
-                "${SOURCE_URL}",
-                SOURCE_BRANCH,
-                SOURCE_CREDENTIALS,
-                true,
-                30,
-                1
-            )
-        }
-
-        try {
-
-            def jenkinsUID = sh (
-                script: 'id -u',
-                returnStdout: true
-            ).trim()
-            def imgName = "${OS}-${DIST}-${ARCH}"
-            def img
-
-            stage("build image") {
-                img = docker.build(
-                    "${imgName}:${timestamp}",
-                    [
-                        "--build-arg uid=${jenkinsUID}",
-                        "--build-arg timestamp=${timestamp}",
-                        "-f ${workingDir}/influxdb-relay/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
-                        "."
-                    ].join(' ')
+            def workingDir = "src/github.com/influxdata"
+            stage("checkout") {
+                git.checkoutGitRepository(
+                    "${workingDir}/influxdb-relay",
+                    "${SOURCE_URL}",
+                    SOURCE_BRANCH,
+                    SOURCE_CREDENTIALS,
+                    true,
+                    30,
+                    1
                 )
             }
-            stage("build package") {
-                img.inside{
-                    sh("""wget https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz &&
-                        tar xf go1.9.linux-amd64.tar.gz &&
-                        export GOROOT=\$PWD/go &&
-                        export PATH=\$PATH:\$GOROOT/bin &&
-                        export GOPATH=\$PWD &&
-                        cd src/github.com/influxdata/influxdb-relay &&
-                        ./build.py --package --version=\"${version}\" --platform=linux --arch=amd64""")
-                }
-                archiveArtifacts artifacts: "${workingDir}/influxdb-relay/build/*.deb"
-            }
-            if (UPLOAD_APTLY.toBoolean()) {
-                lock("aptly-api") {
-                    stage("upload") {
-                        def buildSteps = [:]
-                        def debFiles = sh script: "ls ${workingDir}/influxdb-relay/build/*.deb", returnStdout: true
-                        def debFilesArray = debFiles.trim().tokenize()
-                        def workspace = common.getWorkspace()
-                        for (int i = 0; i < debFilesArray.size(); i++) {
 
-                            def debFile = debFilesArray[i];
-                            buildSteps[debFiles[i]] = aptly.uploadPackageStep(
-                                "${workspace}/"+debFile,
-                                APTLY_URL,
-                                APTLY_REPO,
-                                true
-                            )
+            try {
+
+                def jenkinsUID = sh (
+                    script: 'id -u',
+                    returnStdout: true
+                ).trim()
+                def imgName = "${OS}-${DIST}-${ARCH}"
+                def img
+
+                stage("build image") {
+                    img = docker.build(
+                        "${imgName}:${timestamp}",
+                        [
+                            "--build-arg uid=${jenkinsUID}",
+                            "--build-arg timestamp=${timestamp}",
+                            "-f ${workingDir}/influxdb-relay/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
+                            "."
+                        ].join(' ')
+                    )
+                }
+                stage("build package") {
+                    img.inside{
+                        sh("""wget https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz &&
+                            tar xf go1.9.linux-amd64.tar.gz &&
+                            export GOROOT=\$PWD/go &&
+                            export PATH=\$PATH:\$GOROOT/bin &&
+                            export GOPATH=\$PWD &&
+                            cd src/github.com/influxdata/influxdb-relay &&
+                            ./build.py --package --version=\"${version}\" --platform=linux --arch=amd64""")
+                    }
+                    archiveArtifacts artifacts: "${workingDir}/influxdb-relay/build/*.deb"
+                }
+                if (UPLOAD_APTLY.toBoolean()) {
+                    lock("aptly-api") {
+                        stage("upload") {
+                            def buildSteps = [:]
+                            def debFiles = sh script: "ls ${workingDir}/influxdb-relay/build/*.deb", returnStdout: true
+                            def debFilesArray = debFiles.trim().tokenize()
+                            def workspace = common.getWorkspace()
+                            for (int i = 0; i < debFilesArray.size(); i++) {
+
+                                def debFile = debFilesArray[i];
+                                buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                                    "${workspace}/"+debFile,
+                                    APTLY_URL,
+                                    APTLY_REPO,
+                                    true
+                                )
+                            }
+                            parallel buildSteps
                         }
-                        parallel buildSteps
-                    }
-                    stage("publish") {
-                        aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                        aptly.publish(APTLY_URL)
+                        stage("publish") {
+                            aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                            aptly.publish(APTLY_URL)
+                        }
                     }
                 }
+
+            } catch (Exception e) {
+                currentBuild.result = 'FAILURE'
+                println "Cleaning up docker images"
+                sh("docker images | grep -E '[-:\\ ]+${timestamp}[\\.\\ /\$]+' | awk '{print \$3}' | xargs docker rmi -f || true")
+                throw e
             }
 
-        } catch (Exception e) {
-            currentBuild.result = 'FAILURE'
-            println "Cleaning up docker images"
-            sh("docker images | grep -E '[-:\\ ]+${timestamp}[\\.\\ /\$]+' | awk '{print \$3}' | xargs docker rmi -f || true")
-            throw e
+        } catch (Throwable e) {
+           // If there was an exception thrown, the build failed
+           currentBuild.result = "FAILURE"
+           currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+           throw e
+        } finally {
+           common.sendNotification(currentBuild.result,"",["slack"])
+
+           if (currentBuild.result != 'FAILURE') {
+              sh("rm -rf *")
+           }
         }
-
-    } catch (Throwable e) {
-       // If there was an exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
-
-       if (currentBuild.result != 'FAILURE') {
-          sh("rm -rf *")
-       }
     }
 }
diff --git a/build-debian-packages-jmx-exporter.groovy b/build-debian-packages-jmx-exporter.groovy
index 71f626e..d6e7fbd 100644
--- a/build-debian-packages-jmx-exporter.groovy
+++ b/build-debian-packages-jmx-exporter.groovy
@@ -5,73 +5,74 @@
 
 def timestamp = common.getDatetime()
 def javaversion = "8"
+timeout(time: 12, unit: 'HOURS') {
+    node('docker') {
+        try {
+            def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-${DIST}")
 
-node('docker') {
-    try {
-        def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-${DIST}")
-
-        if ("${DIST}" == "trusty") {
-        	javaversion = "7"
-        }
-
-        img.inside ("-u root:root") {
-            sh("rm -rf * || true")
-        }
-
-        stage("checkout") {
-            git.checkoutGitRepository(
-                "jmx-exporter-${timestamp}",
-                "${SOURCE_URL}",
-                SOURCE_BRANCH,
-                SOURCE_CREDENTIALS,
-                true,
-                30,
-                1
-            )
-        }
-
-        img.inside ("-u root:root") {
-            stage("Build") {
-                sh("sed -i \"s/TIMESTAMP/${timestamp}/g\" \$(find ./ -name pom.xml)")
-                sh("sudo apt-get update && sudo apt-get install -y openjdk-${javaversion}-jdk maven")
-                sh("cd jmx-exporter-${timestamp} && mvn package")
+            if ("${DIST}" == "trusty") {
+            	javaversion = "7"
             }
-        }
 
-        if (UPLOAD_APTLY.toBoolean()) {
-            stage("upload package") {
-                def buildSteps = [:]
-                def debFiles = sh script: "find ./ -name *.deb", returnStdout: true
-                def debFilesArray = debFiles.trim().tokenize()
-                def workspace = common.getWorkspace()
-                for (int i = 0; i < debFilesArray.size(); i++) {
-                    def debFile = debFilesArray[i];
-                    buildSteps[debFiles[i]] = aptly.uploadPackageStep(
-                        "${workspace}/"+debFile,
-                        APTLY_URL,
-                        APTLY_REPO,
-                        true
-                    )
+            img.inside ("-u root:root") {
+                sh("rm -rf * || true")
+            }
+
+            stage("checkout") {
+                git.checkoutGitRepository(
+                    "jmx-exporter-${timestamp}",
+                    "${SOURCE_URL}",
+                    SOURCE_BRANCH,
+                    SOURCE_CREDENTIALS,
+                    true,
+                    30,
+                    1
+                )
+            }
+
+            img.inside ("-u root:root") {
+                stage("Build") {
+                    sh("sed -i \"s/TIMESTAMP/${timestamp}/g\" \$(find ./ -name pom.xml)")
+                    sh("sudo apt-get update && sudo apt-get install -y openjdk-${javaversion}-jdk maven")
+                    sh("cd jmx-exporter-${timestamp} && mvn package")
                 }
-                parallel buildSteps
             }
 
-            stage("publish") {
-                aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                aptly.publish(APTLY_URL)
+            if (UPLOAD_APTLY.toBoolean()) {
+                stage("upload package") {
+                    def buildSteps = [:]
+                    def debFiles = sh script: "find ./ -name *.deb", returnStdout: true
+                    def debFilesArray = debFiles.trim().tokenize()
+                    def workspace = common.getWorkspace()
+                    for (int i = 0; i < debFilesArray.size(); i++) {
+                        def debFile = debFilesArray[i];
+                        buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                            "${workspace}/"+debFile,
+                            APTLY_URL,
+                            APTLY_REPO,
+                            true
+                        )
+                    }
+                    parallel buildSteps
+                }
+
+                stage("publish") {
+                    aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                    aptly.publish(APTLY_URL)
+                }
             }
-        }
 
-        img.inside ("-u root:root") {
-            sh("rm -rf * || true")
-        }
+            img.inside ("-u root:root") {
+                sh("rm -rf * || true")
+            }
 
-    } catch (Throwable e) {
-       // If there was an exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
+        } catch (Throwable e) {
+           // If there was an exception thrown, the build failed
+           currentBuild.result = "FAILURE"
+           currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+           throw e
+        } finally {
+           common.sendNotification(currentBuild.result,"",["slack"])
+        }
     }
 }
diff --git a/build-debian-packages-libvirt-exporter.groovy b/build-debian-packages-libvirt-exporter.groovy
index d373961..2051504 100644
--- a/build-debian-packages-libvirt-exporter.groovy
+++ b/build-debian-packages-libvirt-exporter.groovy
@@ -5,80 +5,81 @@
 
 def timestamp = common.getDatetime()
 def version = "0.1~${timestamp}"
+timeout(time: 12, unit: 'HOURS') {
+    node('docker') {
+        try{
 
-node('docker') {
-    try{
-
-        stage("cleanup") {
-            sh("rm -rf * || true")
-        }
-
-        stage("checkout") {
-            git.checkoutGitRepository(
-                "libvirt-exporter-${version}",
-                "${SOURCE_URL}",
-                SOURCE_BRANCH,
-                SOURCE_CREDENTIALS,
-                true,
-                30,
-                1
-            )
-        }
-
-        stage("build binary") {
-            dir("libvirt-exporter-${version}") {
-                sh("sed -i 's/VERSION/${version}/g' debian/changelog && ./build_static.sh")
+            stage("cleanup") {
+                sh("rm -rf * || true")
             }
-        }
 
-        def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-${DIST}")
-        stage("build package") {
-            img.inside("-u root:root") {
-                sh("apt-get update && apt-get install ruby ruby-dev && gem install fpm")
-                sh("cd libvirt-exporter-${version} && scripts/build.py --package --version=\"${version}\" --platform=linux --arch=amd64")
+            stage("checkout") {
+                git.checkoutGitRepository(
+                    "libvirt-exporter-${version}",
+                    "${SOURCE_URL}",
+                    SOURCE_BRANCH,
+                    SOURCE_CREDENTIALS,
+                    true,
+                    30,
+                    1
+                )
             }
-            archiveArtifacts artifacts: "libvirt-exporter-${version}/build/*.deb"
-        }
 
-        if (UPLOAD_APTLY.toBoolean()) {
-            lock("aptly-api") {
-                stage("upload") {
-                    def buildSteps = [:]
-                    def debFiles = sh(script: "ls libvirt-exporter-${version}/build/*.deb", returnStdout: true)
-                    def debFilesArray = debFiles.trim().tokenize()
-                    def workspace = common.getWorkspace()
-                    for (int i = 0; i < debFilesArray.size(); i++) {
-                        def debFile = debFilesArray[i];
-                        buildSteps[debFiles[i]] = aptly.uploadPackageStep(
-                            "${workspace}/"+debFile,
-                            APTLY_URL,
-                            APTLY_REPO,
-                            true
-                        )
+            stage("build binary") {
+                dir("libvirt-exporter-${version}") {
+                    sh("sed -i 's/VERSION/${version}/g' debian/changelog && ./build_static.sh")
+                }
+            }
+
+            def img = dockerLib.getImage("tcpcloud/debian-build-ubuntu-${DIST}")
+            stage("build package") {
+                img.inside("-u root:root") {
+                    sh("apt-get update && apt-get install ruby ruby-dev && gem install fpm")
+                    sh("cd libvirt-exporter-${version} && scripts/build.py --package --version=\"${version}\" --platform=linux --arch=amd64")
+                }
+                archiveArtifacts artifacts: "libvirt-exporter-${version}/build/*.deb"
+            }
+
+            if (UPLOAD_APTLY.toBoolean()) {
+                lock("aptly-api") {
+                    stage("upload") {
+                        def buildSteps = [:]
+                        def debFiles = sh(script: "ls libvirt-exporter-${version}/build/*.deb", returnStdout: true)
+                        def debFilesArray = debFiles.trim().tokenize()
+                        def workspace = common.getWorkspace()
+                        for (int i = 0; i < debFilesArray.size(); i++) {
+                            def debFile = debFilesArray[i];
+                            buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                                "${workspace}/"+debFile,
+                                APTLY_URL,
+                                APTLY_REPO,
+                                true
+                            )
+                        }
+                        parallel buildSteps
                     }
-                    parallel buildSteps
-                }
-                stage("publish") {
-                    aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                    aptly.publish(APTLY_URL)
+                    stage("publish") {
+                        aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                        aptly.publish(APTLY_URL)
+                    }
                 }
             }
+
+            img.inside("-u root:root") {
+                sh("rm -rf * || true")
+            }
+
+        } catch (Throwable e) {
+           // If there was an exception thrown, the build failed
+           currentBuild.result = "FAILURE"
+           currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+           throw e
+        } finally {
+           common.sendNotification(currentBuild.result,"",["slack"])
+
+           if (currentBuild.result != 'FAILURE') {
+              sh("rm -rf *")
+           }
         }
-
-        img.inside("-u root:root") {
-            sh("rm -rf * || true")
-        }
-
-    } catch (Throwable e) {
-       // If there was an exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
-
-       if (currentBuild.result != 'FAILURE') {
-          sh("rm -rf *")
-       }
     }
 }
diff --git a/build-debian-packages-pipeline.groovy b/build-debian-packages-pipeline.groovy
index 8bfbff6..8d46e8e 100644
--- a/build-debian-packages-pipeline.groovy
+++ b/build-debian-packages-pipeline.groovy
@@ -43,113 +43,116 @@
 }
 
 def timestamp = common.getDatetime()
-node("docker") {
-  try{
-    stage("checkout") {
-      sh("rm -rf src || true")
-      dir("src") {
-        def pollBranches = [[name:SOURCE_BRANCH]]
-        if (debian_branch) {
-          pollBranches.add([name:DEBIAN_BRANCH])
+timeout(time: 12, unit: 'HOURS') {
+  node("docker") {
+    try{
+      stage("checkout") {
+        sh("rm -rf src || true")
+        dir("src") {
+          def pollBranches = [[name:SOURCE_BRANCH]]
+          if (debian_branch) {
+            pollBranches.add([name:DEBIAN_BRANCH])
+          }
+          def extensions = [[$class: 'CleanCheckout']]
+          def userRemoteConfigs = [[credentialsId: SOURCE_CREDENTIALS, url: SOURCE_URL]]
+          // Checkout specified refspec to local branch
+          if (common.validInputParam('SOURCE_REFSPEC')) {
+            extensions.add([$class: 'BuildChooserSetting', buildChooser: [$class: 'GerritTriggerBuildChooser']])
+            extensions.add([$class: 'LocalBranch', localBranch: SOURCE_BRANCH])
+            userRemoteConfigs[0]['refspec'] = SOURCE_REFSPEC
+          }
+          checkout changelog: true, poll: false,
+            scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
+            extensions: extensions,  submoduleCfg: [], userRemoteConfigs: userRemoteConfigs]
+          if (debian_branch){
+            /* There are 2 schemas of build spec keeping:
+                   1. Separate branch with build specs. I.e. debian/xenial
+                   2. Separate directory with specs.
+               Logic below makes package build compatible with both schemas.
+            */
+            def retStatus = sh(script: 'git checkout ' + DEBIAN_BRANCH, returnStatus: true)
+            if (retStatus != 0) {
+              common.warningMsg("Cannot checkout ${DEBIAN_BRANCH} branch. Going to build package by ${SOURCE_BRANCH} branch.")
+            }
+          }
         }
-        def extensions = [[$class: 'CleanCheckout']]
-        def userRemoteConfigs = [[credentialsId: SOURCE_CREDENTIALS, url: SOURCE_URL]]
-        // Checkout specified refspec to local branch
+        debian.cleanup(OS+":"+DIST)
+      }
+      stage("build-source") {
+        // If SOURCE_REFSPEC is defined refspec will be checked out to local branch and need to build it instead of origin branch.
         if (common.validInputParam('SOURCE_REFSPEC')) {
-          extensions.add([$class: 'BuildChooserSetting', buildChooser: [$class: 'GerritTriggerBuildChooser']])
-          extensions.add([$class: 'LocalBranch', localBranch: SOURCE_BRANCH])
-          userRemoteConfigs[0]['refspec'] = SOURCE_REFSPEC
+          debian.buildSource("src", OS+":"+DIST, snapshot, 'Jenkins', 'autobuild@mirantis.com', revisionPostfix, '')
+        } else {
+          debian.buildSource("src", OS+":"+DIST, snapshot, 'Jenkins', 'autobuild@mirantis.com', revisionPostfix)
         }
-        checkout changelog: true, poll: false,
-          scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-          extensions: extensions,  submoduleCfg: [], userRemoteConfigs: userRemoteConfigs]
-        if (debian_branch){
-          /* There are 2 schemas of build spec keeping:
-                 1. Separate branch with build specs. I.e. debian/xenial
-                 2. Separate directory with specs.
-             Logic below makes package build compatible with both schemas.
-          */
-          def retStatus = sh(script: 'git checkout ' + DEBIAN_BRANCH, returnStatus: true)
-          if (retStatus != 0) {
-            common.warningMsg("Cannot checkout ${DEBIAN_BRANCH} branch. Going to build package by ${SOURCE_BRANCH} branch.")
+        archiveArtifacts artifacts: "build-area/*.dsc"
+        archiveArtifacts artifacts: "build-area/*_source.changes"
+        archiveArtifacts artifacts: "build-area/*.tar.*"
+      }
+      stage("build-binary") {
+        dsc = sh script: "ls build-area/*.dsc", returnStdout: true
+        if(common.validInputParam("PRE_BUILD_SCRIPT")) {
+          writeFile([file:"pre_build_script.sh", text: env['PRE_BUILD_SCRIPT']])
+        }
+        debian.buildBinary(
+          dsc.trim(),
+          OS+":"+DIST,
+          EXTRA_REPO_URL,
+          EXTRA_REPO_KEY_URL
+        )
+        archiveArtifacts artifacts: "build-area/*.deb"
+      }
+
+      if (lintianCheck) {
+        stage("lintian") {
+          changes = sh script: "ls build-area/*_"+ARCH+".changes", returnStdout: true
+          try {
+            debian.runLintian(changes.trim(), OS, OS+":"+DIST)
+          } catch (Exception e) {
+            println "[WARN] Lintian returned non-zero exit status"
+            currentBuild.result = 'UNSTABLE'
           }
         }
       }
-      debian.cleanup(OS+":"+DIST)
-    }
-    stage("build-source") {
-      // If SOURCE_REFSPEC is defined refspec will be checked out to local branch and need to build it instead of origin branch.
-      if (common.validInputParam('SOURCE_REFSPEC')) {
-        debian.buildSource("src", OS+":"+DIST, snapshot, 'Jenkins', 'autobuild@mirantis.com', revisionPostfix, '')
-      } else {
-        debian.buildSource("src", OS+":"+DIST, snapshot, 'Jenkins', 'autobuild@mirantis.com', revisionPostfix)
-      }
-      archiveArtifacts artifacts: "build-area/*.dsc"
-      archiveArtifacts artifacts: "build-area/*_source.changes"
-      archiveArtifacts artifacts: "build-area/*.tar.*"
-    }
-    stage("build-binary") {
-      dsc = sh script: "ls build-area/*.dsc", returnStdout: true
-      if(common.validInputParam("PRE_BUILD_SCRIPT")) {
-        writeFile([file:"pre_build_script.sh", text: env['PRE_BUILD_SCRIPT']])
-      }
-      debian.buildBinary(
-        dsc.trim(),
-        OS+":"+DIST,
-        EXTRA_REPO_URL,
-        EXTRA_REPO_KEY_URL
-      )
-      archiveArtifacts artifacts: "build-area/*.deb"
-    }
 
-    if (lintianCheck) {
-      stage("lintian") {
-        changes = sh script: "ls build-area/*_"+ARCH+".changes", returnStdout: true
-        try {
-          debian.runLintian(changes.trim(), OS, OS+":"+DIST)
-        } catch (Exception e) {
-          println "[WARN] Lintian returned non-zero exit status"
-          currentBuild.result = 'UNSTABLE'
-        }
-      }
-    }
-
-    if (uploadAptly) {
-      lock("aptly-api") {
-        stage("upload") {
-          buildSteps = [:]
-          debFiles = sh script: "ls build-area/*.deb", returnStdout: true
-          for (file in debFiles.tokenize()) {
-            workspace = common.getWorkspace()
-            def fh = new File((workspace+"/"+file).trim())
-            buildSteps[fh.name.split('_')[0]] = aptly.uploadPackageStep(
-                  "build-area/"+fh.name,
-                  APTLY_URL,
-                  APTLY_REPO,
-                  true
-              )
+      if (uploadAptly) {
+        lock("aptly-api") {
+          stage("upload") {
+            buildSteps = [:]
+            debFiles = sh script: "ls build-area/*.deb", returnStdout: true
+            for (file in debFiles.tokenize()) {
+              workspace = common.getWorkspace()
+              def fh = new File((workspace+"/"+file).trim())
+              buildSteps[fh.name.split('_')[0]] = aptly.uploadPackageStep(
+                    "build-area/"+fh.name,
+                    APTLY_URL,
+                    APTLY_REPO,
+                    true
+                )
+            }
+            parallel buildSteps
           }
-          parallel buildSteps
-        }
 
-        stage("publish") {
-          aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-          aptly.publish(APTLY_URL)
+          stage("publish") {
+            aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+            aptly.publish(APTLY_URL)
+          }
         }
       }
-    }
-    if (uploadPpa) {
-      stage("upload launchpad") {
-        debian.importGpgKey("launchpad-private")
-        debian.uploadPpa(PPA, "build-area", "launchpad-private")
+      if (uploadPpa) {
+        stage("upload launchpad") {
+          debian.importGpgKey("launchpad-private")
+          debian.uploadPpa(PPA, "build-area", "launchpad-private")
+        }
       }
+    } catch (Throwable e) {
+       // If there was an error or exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
     }
-  } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-     throw e
-  } finally {
-     common.sendNotification(currentBuild.result,"",["slack"])
   }
 }
+
diff --git a/build-debian-packages-prometheus-relay.groovy b/build-debian-packages-prometheus-relay.groovy
index 1044371..45e5b10 100644
--- a/build-debian-packages-prometheus-relay.groovy
+++ b/build-debian-packages-prometheus-relay.groovy
@@ -5,104 +5,105 @@
 
 def timestamp = common.getDatetime()
 def version = "0.1~${timestamp}"
+timeout(time: 12, unit: 'HOURS') {
+    node('docker') {
+        try{
 
-node('docker') {
-    try{
+            stage("cleanup") {
+                sh("rm -rf * || true")
+            }
 
-        stage("cleanup") {
-            sh("rm -rf * || true")
-        }
-
-        def workingDir = "src/gerrit.mcp.mirantis.net/debian"
-        stage("checkout") {
-            git.checkoutGitRepository(
-                "${workingDir}/prometheus-relay",
-                "${SOURCE_URL}",
-                SOURCE_BRANCH,
-                SOURCE_CREDENTIALS,
-                true,
-                30,
-                1
-            )
-        }
-
-        try {
-
-            def jenkinsUID = sh (
-                script: 'id -u',
-                returnStdout: true
-            ).trim()
-            def imgName = "${OS}-${DIST}-${ARCH}"
-            def img
-
-            stage("build image") {
-                img = docker.build(
-                    "${imgName}:${timestamp}",
-                    [
-                        "--build-arg uid=${jenkinsUID}",
-                        "--build-arg timestamp=${timestamp}",
-                        "-f ${workingDir}/prometheus-relay/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
-                        "."
-                    ].join(' ')
+            def workingDir = "src/gerrit.mcp.mirantis.net/debian"
+            stage("checkout") {
+                git.checkoutGitRepository(
+                    "${workingDir}/prometheus-relay",
+                    "${SOURCE_URL}",
+                    SOURCE_BRANCH,
+                    SOURCE_CREDENTIALS,
+                    true,
+                    30,
+                    1
                 )
             }
-            stage("build package") {
-                img.inside{
-                    sh("""wget https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz &&
-                        tar xf go1.8.1.linux-amd64.tar.gz &&
-                        export GOROOT=\$PWD/go &&
-                        export GOPATH=\$PWD &&
-                        export PATH=\$PATH:\$GOPATH/bin:\$GOROOT/bin &&
-                        cd src/gerrit.mcp.mirantis.net/debian/prometheus-relay &&
-                        make""")
-                }
-                archiveArtifacts artifacts: "${workingDir}/prometheus-relay/build/*.deb"
-            }
-            if (UPLOAD_APTLY.toBoolean()) {
-                lock("aptly-api") {
-                    stage("upload") {
-                        def buildSteps = [:]
-                        def debFiles = sh script: "ls ${workingDir}/prometheus-relay/build/*.deb", returnStdout: true
-                        def debFilesArray = debFiles.trim().tokenize()
-                        def workspace = common.getWorkspace()
-                        for (int i = 0; i < debFilesArray.size(); i++) {
 
-                            def debFile = debFilesArray[i];
-                            buildSteps[debFiles[i]] = aptly.uploadPackageStep(
-                                "${workspace}/"+debFile,
-                                APTLY_URL,
-                                APTLY_REPO,
-                                true
-                            )
+            try {
+
+                def jenkinsUID = sh (
+                    script: 'id -u',
+                    returnStdout: true
+                ).trim()
+                def imgName = "${OS}-${DIST}-${ARCH}"
+                def img
+
+                stage("build image") {
+                    img = docker.build(
+                        "${imgName}:${timestamp}",
+                        [
+                            "--build-arg uid=${jenkinsUID}",
+                            "--build-arg timestamp=${timestamp}",
+                            "-f ${workingDir}/prometheus-relay/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
+                            "."
+                        ].join(' ')
+                    )
+                }
+                stage("build package") {
+                    img.inside{
+                        sh("""wget https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz &&
+                            tar xf go1.8.1.linux-amd64.tar.gz &&
+                            export GOROOT=\$PWD/go &&
+                            export GOPATH=\$PWD &&
+                            export PATH=\$PATH:\$GOPATH/bin:\$GOROOT/bin &&
+                            cd src/gerrit.mcp.mirantis.net/debian/prometheus-relay &&
+                            make""")
+                    }
+                    archiveArtifacts artifacts: "${workingDir}/prometheus-relay/build/*.deb"
+                }
+                if (UPLOAD_APTLY.toBoolean()) {
+                    lock("aptly-api") {
+                        stage("upload") {
+                            def buildSteps = [:]
+                            def debFiles = sh script: "ls ${workingDir}/prometheus-relay/build/*.deb", returnStdout: true
+                            def debFilesArray = debFiles.trim().tokenize()
+                            def workspace = common.getWorkspace()
+                            for (int i = 0; i < debFilesArray.size(); i++) {
+
+                                def debFile = debFilesArray[i];
+                                buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                                    "${workspace}/"+debFile,
+                                    APTLY_URL,
+                                    APTLY_REPO,
+                                    true
+                                )
+                            }
+                            parallel buildSteps
                         }
-                        parallel buildSteps
-                    }
-                    stage("publish") {
-                        aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                        aptly.publish(APTLY_URL)
-                    }
+                        stage("publish") {
+                            aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                            aptly.publish(APTLY_URL)
+                        }
 
-                    stage("rebuild docker images") {
-                        build job: "docker-build-images-prometheus", parameters: []
+                        stage("rebuild docker images") {
+                            build job: "docker-build-images-prometheus", parameters: []
+                        }
                     }
                 }
+
+            } catch (Exception e) {
+                currentBuild.result = 'FAILURE'
+                throw e
             }
 
-        } catch (Exception e) {
-            currentBuild.result = 'FAILURE'
-            throw e
+        } catch (Throwable e) {
+           // If there was an exception thrown, the build failed
+           currentBuild.result = "FAILURE"
+           currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+           throw e
+        } finally {
+           common.sendNotification(currentBuild.result,"",["slack"])
+
+           if (currentBuild.result != 'FAILURE') {
+              sh("rm -rf *")
+           }
         }
-
-    } catch (Throwable e) {
-       // If there was an exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
-
-       if (currentBuild.result != 'FAILURE') {
-          sh("rm -rf *")
-       }
     }
 }
diff --git a/build-debian-packages-telegraf.groovy b/build-debian-packages-telegraf.groovy
index efcddaa..566c097 100644
--- a/build-debian-packages-telegraf.groovy
+++ b/build-debian-packages-telegraf.groovy
@@ -4,106 +4,107 @@
 def aptly = new com.mirantis.mk.Aptly()
 
 def timestamp = common.getDatetime()
+timeout(time: 12, unit: 'HOURS') {
+    node('docker') {
+        try{
 
-node('docker') {
-    try{
+            stage("cleanup") {
+                sh("rm -rf * || true")
+            }
 
-        stage("cleanup") {
-            sh("rm -rf * || true")
-        }
-
-        def workingDir = "src/github.com/influxdata"
-        stage("checkout") {
-            git.checkoutGitRepository(
-                "${workingDir}/telegraf",
-                "${SOURCE_URL}",
-                SOURCE_BRANCH,
-                SOURCE_CREDENTIALS,
-                true,
-                30,
-                1
-            )
-        }
-
-        try {
-
-            def jenkinsUID = sh (
-                script: 'id -u',
-                returnStdout: true
-            ).trim()
-            def imgName = "${OS}-${DIST}-${ARCH}"
-            def img
-
-            stage("build image") {
-                img = docker.build(
-                    "${imgName}:${timestamp}",
-                    [
-                        "--build-arg uid=${jenkinsUID}",
-                        "--build-arg timestamp=${timestamp}",
-                        "-f ${workingDir}/telegraf/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
-                        "."
-                    ].join(' ')
+            def workingDir = "src/github.com/influxdata"
+            stage("checkout") {
+                git.checkoutGitRepository(
+                    "${workingDir}/telegraf",
+                    "${SOURCE_URL}",
+                    SOURCE_BRANCH,
+                    SOURCE_CREDENTIALS,
+                    true,
+                    30,
+                    1
                 )
             }
-            stage("build package") {
-                img.inside{
-                    sh("""wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz &&
-                        tar xf go1.9.2.linux-amd64.tar.gz &&
-                        export GOROOT=\$PWD/go &&
-                        export PATH=\$PATH:\$GOROOT/bin &&
-                        export GOPATH=\$PWD &&
-                        cd src/github.com/influxdata/telegraf &&
-                        scripts/build.py --package --platform=linux --arch=amd64""")
-                }
-                archiveArtifacts artifacts: "${workingDir}/telegraf/build/*.deb"
-            }
-            if (UPLOAD_APTLY.toBoolean()) {
-                lock("aptly-api") {
-                    stage("upload") {
-                        def buildSteps = [:]
-                        def debFiles = sh script: "ls ${workingDir}/telegraf/build/*.deb", returnStdout: true
-                        def debFilesArray = debFiles.trim().tokenize()
-                        def workspace = common.getWorkspace()
-                        for (int i = 0; i < debFilesArray.size(); i++) {
 
-                            def debFile = debFilesArray[i];
-                            buildSteps[debFiles[i]] = aptly.uploadPackageStep(
-                                "${workspace}/"+debFile,
-                                APTLY_URL,
-                                APTLY_REPO,
-                                true
-                            )
+            try {
+
+                def jenkinsUID = sh (
+                    script: 'id -u',
+                    returnStdout: true
+                ).trim()
+                def imgName = "${OS}-${DIST}-${ARCH}"
+                def img
+
+                stage("build image") {
+                    img = docker.build(
+                        "${imgName}:${timestamp}",
+                        [
+                            "--build-arg uid=${jenkinsUID}",
+                            "--build-arg timestamp=${timestamp}",
+                            "-f ${workingDir}/telegraf/docker/${OS}-${DIST}-${ARCH}.Dockerfile",
+                            "."
+                        ].join(' ')
+                    )
+                }
+                stage("build package") {
+                    img.inside{
+                        sh("""wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz &&
+                            tar xf go1.9.2.linux-amd64.tar.gz &&
+                            export GOROOT=\$PWD/go &&
+                            export PATH=\$PATH:\$GOROOT/bin &&
+                            export GOPATH=\$PWD &&
+                            cd src/github.com/influxdata/telegraf &&
+                            scripts/build.py --package --platform=linux --arch=amd64""")
+                    }
+                    archiveArtifacts artifacts: "${workingDir}/telegraf/build/*.deb"
+                }
+                if (UPLOAD_APTLY.toBoolean()) {
+                    lock("aptly-api") {
+                        stage("upload") {
+                            def buildSteps = [:]
+                            def debFiles = sh script: "ls ${workingDir}/telegraf/build/*.deb", returnStdout: true
+                            def debFilesArray = debFiles.trim().tokenize()
+                            def workspace = common.getWorkspace()
+                            for (int i = 0; i < debFilesArray.size(); i++) {
+
+                                def debFile = debFilesArray[i];
+                                buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                                    "${workspace}/"+debFile,
+                                    APTLY_URL,
+                                    APTLY_REPO,
+                                    true
+                                )
+                            }
+                            parallel buildSteps
                         }
-                        parallel buildSteps
-                    }
-                    stage("publish") {
-                        aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-                        aptly.publish(APTLY_URL)
-                    }
+                        stage("publish") {
+                            aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                            aptly.publish(APTLY_URL)
+                        }
 
-                    stage("rebuild docker images") {
-                        build job: "docker-build-images-prometheus", parameters: []
+                        stage("rebuild docker images") {
+                            build job: "docker-build-images-prometheus", parameters: []
+                        }
                     }
                 }
+
+            } catch (Exception e) {
+                currentBuild.result = 'FAILURE'
+                println "Cleaning up docker images"
+                sh("docker images | grep -E '[-:\\ ]+${timestamp}[\\.\\ /\$]+' | awk '{print \$3}' | xargs docker rmi -f || true")
+                throw e
             }
 
-        } catch (Exception e) {
-            currentBuild.result = 'FAILURE'
-            println "Cleaning up docker images"
-            sh("docker images | grep -E '[-:\\ ]+${timestamp}[\\.\\ /\$]+' | awk '{print \$3}' | xargs docker rmi -f || true")
-            throw e
+        } catch (Throwable e) {
+           // If there was an exception thrown, the build failed
+           currentBuild.result = "FAILURE"
+           currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+           throw e
+        } finally {
+           common.sendNotification(currentBuild.result,"",["slack"])
+
+           if (currentBuild.result != 'FAILURE') {
+              sh("rm -rf *")
+           }
         }
-
-    } catch (Throwable e) {
-       // If there was an exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
-
-       if (currentBuild.result != 'FAILURE') {
-          sh("rm -rf *")
-       }
     }
 }
diff --git a/build-extra-dpdk-pipeline.groovy b/build-extra-dpdk-pipeline.groovy
index 357a9ad..4d096ae 100644
--- a/build-extra-dpdk-pipeline.groovy
+++ b/build-extra-dpdk-pipeline.groovy
@@ -9,59 +9,60 @@
 } catch (MissingPropertyException e) {
   binaryPackages = ""
 }
-
-node("docker") {
-  try {
-    def workspace = common.getWorkspace()
-    stage("checkout") {
-      sh("test -d debs && rm -rf debs || true")
-      sh("test -d build && rm -rf build || true")
-      git.checkoutGitRepository(
-                            ".",
-                            SOURCE_URL,
-                            SOURCE_BRANCH,
-                            SOURCE_CREDENTIALS,
-                            false,
-                            30,
-                            1
-                        )
-    }
-    stage("build") {
-      if (binaryPackages == "all" || binaryPackages == "") {
-        sh("docker run -v " + workspace + ":" + workspace + " -w " + workspace + " --rm=true --privileged "+OS+":" + DIST +
-            " /bin/bash -c 'apt-get update && apt-get install -y packaging-dev && ./build-debs.sh " + DIST + "'")
-      } else {
-        sh("docker run -v " + workspace + ":" + workspace + " -w " + workspace + " --rm=true --privileged "+OS+":" + DIST +
-            " /bin/bash -c 'apt-get update && apt-get install -y packaging-dev && ./build-debs.sh " + DIST + " " + binaryPackages + "'")
+timeout(time: 12, unit: 'HOURS') {
+  node("docker") {
+    try {
+      def workspace = common.getWorkspace()
+      stage("checkout") {
+        sh("test -d debs && rm -rf debs || true")
+        sh("test -d build && rm -rf build || true")
+        git.checkoutGitRepository(
+                              ".",
+                              SOURCE_URL,
+                              SOURCE_BRANCH,
+                              SOURCE_CREDENTIALS,
+                              false,
+                              30,
+                              1
+                          )
       }
-      archiveArtifacts artifacts: "debs/${DIST}-${ARCH}/*.deb"
-    }
-    lock("aptly-api") {
-      stage("upload") {
-        buildSteps = [:]
-        debFiles = sh script: "ls debs/"+DIST+"-"+ARCH+"/*.deb", returnStdout: true
-        for (file in debFiles.tokenize()) {
-            def fh = new File((workspace+"/"+file).trim())
-            buildSteps[fh.name.split('_')[0]] = aptly.uploadPackageStep(
-                "debs/"+DIST+"-"+ARCH+"/"+fh.name,
-                APTLY_URL,
-                APTLY_REPO,
-                true
-            )
+      stage("build") {
+        if (binaryPackages == "all" || binaryPackages == "") {
+          sh("docker run -v " + workspace + ":" + workspace + " -w " + workspace + " --rm=true --privileged "+OS+":" + DIST +
+              " /bin/bash -c 'apt-get update && apt-get install -y packaging-dev && ./build-debs.sh " + DIST + "'")
+        } else {
+          sh("docker run -v " + workspace + ":" + workspace + " -w " + workspace + " --rm=true --privileged "+OS+":" + DIST +
+              " /bin/bash -c 'apt-get update && apt-get install -y packaging-dev && ./build-debs.sh " + DIST + " " + binaryPackages + "'")
         }
-        parallel buildSteps
+        archiveArtifacts artifacts: "debs/${DIST}-${ARCH}/*.deb"
       }
-      stage("publish") {
-          aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
-          aptly.publish(APTLY_URL)
+      lock("aptly-api") {
+        stage("upload") {
+          buildSteps = [:]
+          debFiles = sh script: "ls debs/"+DIST+"-"+ARCH+"/*.deb", returnStdout: true
+          for (file in debFiles.tokenize()) {
+              def fh = new File((workspace+"/"+file).trim())
+              buildSteps[fh.name.split('_')[0]] = aptly.uploadPackageStep(
+                  "debs/"+DIST+"-"+ARCH+"/"+fh.name,
+                  APTLY_URL,
+                  APTLY_REPO,
+                  true
+              )
+          }
+          parallel buildSteps
+        }
+        stage("publish") {
+            aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+            aptly.publish(APTLY_URL)
+        }
+       }
+       } catch (Throwable e) {
+         // If there was an error or exception thrown, the build failed
+         currentBuild.result = "FAILURE"
+         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+         throw e
+      } finally {
+         common.sendNotification(currentBuild.result,"",["slack"])
       }
-     }
-     } catch (Throwable e) {
-       // If there was an error or exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
-    }
+  }
 }
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index b233050..f2af894 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -16,61 +16,62 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
 
-node("python") {
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-    matches = ["osd", "mon", "rgw"]
-    def found = false
-    for (s in matches) {
-        if (HOST_TYPE.toLowerCase() == s) {
-            found = true
-        }
-    }
-
-    if (!found) {
-        common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
-        throw new InterruptedException()
-    }
-
-    if (HOST_TYPE.toLowerCase() != 'osd') {
-
-        // launch VMs
-        stage('Launch VMs') {
-            salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control', true)
-
-            // wait till the HOST appears in salt-key on salt-master
-            salt.minionPresent(pepperEnv, 'I@salt:master', HOST)
-        }
-    }
-
-    // run basic states
-    stage('Install infra') {
-        orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
-    }
-
-    if (HOST_TYPE.toLowerCase() == 'osd') {
-
-        // Install Ceph osd
-        stage('Install Ceph OSD') {
-            orchestrate.installCephOsd(pepperEnv, HOST)
-        }
-    } else if (HOST_TYPE.toLowerCase() == 'mon') {
-        // Install Ceph mon
-        stage('Install Ceph MON') {
-            salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
-            // install Ceph Mons
-            salt.enforceState(pepperEnv, 'I@ceph:mon', 'ceph.mon', true)
-            if (salt.testTarget(pepperEnv, 'I@ceph:mgr')) {
-                salt.enforceState(pepperEnv, 'I@ceph:mgr', 'ceph.mgr', true)
+        matches = ["osd", "mon", "rgw"]
+        def found = false
+        for (s in matches) {
+            if (HOST_TYPE.toLowerCase() == s) {
+                found = true
             }
         }
-    } else if (HOST_TYPE.toLowerCase() == 'rgw') {
-        // Install Ceph rgw
-        stage('Install Ceph RGW') {
-            salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy', 'ceph.radosgw'], true)
+
+        if (!found) {
+            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
+            throw new InterruptedException()
+        }
+
+        if (HOST_TYPE.toLowerCase() != 'osd') {
+
+            // launch VMs
+            stage('Launch VMs') {
+                salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control', true)
+
+                // wait till the HOST appears in salt-key on salt-master
+                salt.minionPresent(pepperEnv, 'I@salt:master', HOST)
+            }
+        }
+
+        // run basic states
+        stage('Install infra') {
+            orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
+        }
+
+        if (HOST_TYPE.toLowerCase() == 'osd') {
+
+            // Install Ceph osd
+            stage('Install Ceph OSD') {
+                orchestrate.installCephOsd(pepperEnv, HOST)
+            }
+        } else if (HOST_TYPE.toLowerCase() == 'mon') {
+            // Install Ceph mon
+            stage('Install Ceph MON') {
+                salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
+                // install Ceph Mons
+                salt.enforceState(pepperEnv, 'I@ceph:mon', 'ceph.mon', true)
+                if (salt.testTarget(pepperEnv, 'I@ceph:mgr')) {
+                    salt.enforceState(pepperEnv, 'I@ceph:mgr', 'ceph.mgr', true)
+                }
+            }
+        } else if (HOST_TYPE.toLowerCase() == 'rgw') {
+            // Install Ceph rgw
+            stage('Install Ceph RGW') {
+                salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy', 'ceph.radosgw'], true)
+            }
         }
     }
 }
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index f5c99a4..7a5821d 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -109,171 +109,172 @@
         sleep(10)
     }
 }
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
 
-node("python") {
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        if (MIGRATION_METHOD == 'per-osd') {
 
-    if (MIGRATION_METHOD == 'per-osd') {
-
-        if (flags.size() > 0) {
-            stage('Set cluster flags') {
-                for (flag in flags) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
-                }
-            }
-        }
-
-        def target_hosts = salt.getMinions(pepperEnv, TARGET)
-
-        for (tgt in target_hosts) {
-            def osd_ids = []
-
-            // get list of osd disks of the tgt
-            salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
-            def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
-
-            for (i in ceph_disks) {
-                def osd_id = i.getKey().toString()
-                if (osd_id in osds || OSD == '*') {
-                    osd_ids.add('osd.' + osd_id)
-                    print("Will migrate " + osd_id)
-                } else {
-                    print("Skipping " + osd_id)
+            if (flags.size() > 0) {
+                stage('Set cluster flags') {
+                    for (flag in flags) {
+                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                    }
                 }
             }
 
-            for (osd_id in osd_ids) {
+            def target_hosts = salt.getMinions(pepperEnv, TARGET)
 
-                def id = osd_id.replaceAll('osd.', '')
-                def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
+            for (tgt in target_hosts) {
+                def osd_ids = []
 
-                if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
+                // get list of osd disks of the tgt
+                salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+                def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
 
-                    // wait for healthy cluster before manipulating with osds
-                    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-                        waitForHealthy(pepperEnv)
-                    }
-
-                    // `ceph osd out <id> <id>`
-                    stage('Set OSDs out') {
-                            runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
-                    }
-
-                    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-                        sleep(5)
-                        waitForHealthy(pepperEnv)
-                    }
-
-                    // stop osd daemons
-                    stage('Stop OSD daemons') {
-                        salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')],  null, true)
-                    }
-
-                    // remove keyring `ceph auth del osd.3`
-                    stage('Remove OSD keyrings from auth') {
-                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
-                    }
-
-                    // remove osd `ceph osd rm osd.3`
-                    stage('Remove OSDs') {
-                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
-                    }
-
-                    def dmcrypt = ""
-                    try {
-                        dmcrypt = runCephCommand(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
-                    } catch (Exception e) {
-                        common.warningMsg(e)
-                    }
-
-                    if (dmcrypt?.trim()) {
-                        def mount = runCephCommand(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
-                        dev = mount.split()[0].replaceAll("[0-9]","")
-
-                        // remove partition tables
-                        stage('dd part tables') {
-                            runCephCommand(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
-                        }
-
-                        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-                        removeJournalOrBlockPartitions(pepperEnv, tgt, id)
-
-                        // reboot
-                        stage('reboot and wait') {
-                            salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
-                            salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
-                            sleep(10)
-                        }
-
-                        // zap disks `ceph-disk zap /dev/sdi`
-                        stage('Zap devices') {
-                            try {
-                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
-                            } catch (Exception e) {
-                                common.warningMsg(e)
-                            }
-                            runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
-                        }
-
+                for (i in ceph_disks) {
+                    def osd_id = i.getKey().toString()
+                    if (osd_id in osds || OSD == '*') {
+                        osd_ids.add('osd.' + osd_id)
+                        print("Will migrate " + osd_id)
                     } else {
+                        print("Skipping " + osd_id)
+                    }
+                }
 
-                        def mount = runCephCommand(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
-                        dev = mount.split()[0].replaceAll("[0-9]","")
+                for (osd_id in osd_ids) {
 
-                        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-                        removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+                    def id = osd_id.replaceAll('osd.', '')
+                    def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
 
-                        // umount `umount /dev/sdi1`
-                        stage('Umount devices') {
-                            runCephCommand(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+                    if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
+
+                        // wait for healthy cluster before manipulating with osds
+                        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+                            waitForHealthy(pepperEnv)
                         }
 
-                        // zap disks `ceph-disk zap /dev/sdi`
-                        stage('Zap device') {
-                            runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                        // `ceph osd out <id> <id>`
+                        stage('Set OSDs out') {
+                                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
                         }
-                    }
 
-                    // Deploy Ceph OSD
-                    stage('Deploy Ceph OSD') {
-                        salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
-                        salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
-                    }
-
-                    if (PER_OSD_CONTROL.toBoolean() == true) {
-                        stage("Verify backend version for osd.${id}") {
+                        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
                             sleep(5)
-                            runCephCommand(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
-                            runCephCommand(pepperEnv, tgt, "ceph -s")
+                            waitForHealthy(pepperEnv)
                         }
 
-                        stage('Ask for manual confirmation') {
-                            input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+                        // stop osd daemons
+                        stage('Stop OSD daemons') {
+                            salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')],  null, true)
+                        }
+
+                        // remove keyring `ceph auth del osd.3`
+                        stage('Remove OSD keyrings from auth') {
+                            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+                        }
+
+                        // remove osd `ceph osd rm osd.3`
+                        stage('Remove OSDs') {
+                            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+                        }
+
+                        def dmcrypt = ""
+                        try {
+                            dmcrypt = runCephCommand(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
+
+                        if (dmcrypt?.trim()) {
+                            def mount = runCephCommand(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                            dev = mount.split()[0].replaceAll("[0-9]","")
+
+                            // remove partition tables
+                            stage('dd part tables') {
+                                runCephCommand(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                            }
+
+                            // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                            removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+                            // reboot
+                            stage('reboot and wait') {
+                                salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
+                                salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+                                sleep(10)
+                            }
+
+                            // zap disks `ceph-disk zap /dev/sdi`
+                            stage('Zap devices') {
+                                try {
+                                    runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                                } catch (Exception e) {
+                                    common.warningMsg(e)
+                                }
+                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                            }
+
+                        } else {
+
+                            def mount = runCephCommand(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+                            dev = mount.split()[0].replaceAll("[0-9]","")
+
+                            // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                            removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+                            // umount `umount /dev/sdi1`
+                            stage('Umount devices') {
+                                runCephCommand(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+                            }
+
+                            // zap disks `ceph-disk zap /dev/sdi`
+                            stage('Zap device') {
+                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                            }
+                        }
+
+                        // Deploy Ceph OSD
+                        stage('Deploy Ceph OSD') {
+                            salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
+                            salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
+                        }
+
+                        if (PER_OSD_CONTROL.toBoolean() == true) {
+                            stage("Verify backend version for osd.${id}") {
+                                sleep(5)
+                                runCephCommand(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+                                runCephCommand(pepperEnv, tgt, "ceph -s")
+                            }
+
+                            stage('Ask for manual confirmation') {
+                                input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+                            }
                         }
                     }
                 }
-            }
-            if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
-                stage("Verify backend versions") {
-                    sleep(5)
-                    runCephCommand(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
-                    runCephCommand(pepperEnv, tgt, "ceph -s")
+                if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
+                    stage("Verify backend versions") {
+                        sleep(5)
+                        runCephCommand(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+                        runCephCommand(pepperEnv, tgt, "ceph -s")
+                    }
+
+                    stage('Ask for manual confirmation') {
+                        input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
+                    }
                 }
 
-                stage('Ask for manual confirmation') {
-                    input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
-                }
             }
-
-        }
-        // remove cluster flags
-        if (flags.size() > 0) {
-            stage('Unset cluster flags') {
-                for (flag in flags) {
-                    common.infoMsg('Removing flag ' + flag)
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+            // remove cluster flags
+            if (flags.size() > 0) {
+                stage('Unset cluster flags') {
+                    for (flag in flags) {
+                        common.infoMsg('Removing flag ' + flag)
+                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                    }
                 }
             }
         }
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index b34de91..e616a28 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -55,250 +55,251 @@
         sleep(10)
     }
 }
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
 
-node("python") {
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-    matches = ["osd", "mon", "rgw"]
-    def found = false
-    for (s in matches) {
-        if (HOST_TYPE.toLowerCase() == s) {
-            found = true
-        }
-    }
-
-    if (!found) {
-        common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
-        throw new InterruptedException()
-    }
-
-    stage('Refresh_pillar') {
-        salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
-    }
-
-    //  split minion id on '.' and remove '*'
-    def target = HOST.split("\\.")[0].replace("*", "")
-
-    salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.sync_grains', [], null, true, 5)
-    def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
-    domain = _pillar['return'][0].values()[0].values()[0]
-
-    if (HOST_TYPE.toLowerCase() == 'rgw') {
-        // Remove Ceph rgw
-        stage('Remove Ceph RGW') {
-            salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
-        }
-    }
-
-    if (HOST_TYPE.toLowerCase() != 'osd') {
-
-        // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
-        stage('Destroy/Undefine VM') {
-            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
-            def kvm01 = _pillar['return'][0].values()[0].values()[0]
-
-            _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
-            def targetProvider = _pillar['return'][0].values()[0]
-
-            salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
-            salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
-        }
-    } else if (HOST_TYPE.toLowerCase() == 'osd') {
-        def osd_ids = []
-
-        // get list of osd disks of the host
-        salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-        def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
-
-        for (i in ceph_disks) {
-            def osd_id = i.getKey().toString()
-            osd_ids.add('osd.' + osd_id)
-            print("Will delete " + osd_id)
-        }
-
-        // `ceph osd out <id> <id>`
-        stage('Set OSDs out') {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
-        }
-
-        // wait for healthy cluster
-        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-            sleep(5)
-            waitForHealthy(pepperEnv)
-        }
-
-        // stop osd daemons
-        stage('Stop OSD daemons') {
-            for (i in osd_ids) {
-                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+        matches = ["osd", "mon", "rgw"]
+        def found = false
+        for (s in matches) {
+            if (HOST_TYPE.toLowerCase() == s) {
+                found = true
             }
         }
 
-        // `ceph osd crush remove osd.2`
-        stage('Remove OSDs from CRUSH') {
-            for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+        if (!found) {
+            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
+            throw new InterruptedException()
+        }
+
+        stage('Refresh_pillar') {
+            salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
+        }
+
+        //  split minion id on '.' and remove '*'
+        def target = HOST.split("\\.")[0].replace("*", "")
+
+        salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.sync_grains', [], null, true, 5)
+        def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
+        domain = _pillar['return'][0].values()[0].values()[0]
+
+        if (HOST_TYPE.toLowerCase() == 'rgw') {
+            // Remove Ceph rgw
+            stage('Remove Ceph RGW') {
+                salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
             }
         }
 
-        // remove keyring `ceph auth del osd.3`
-        stage('Remove OSD keyrings from auth') {
-            for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+        if (HOST_TYPE.toLowerCase() != 'osd') {
+
+            // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
+            stage('Destroy/Undefine VM') {
+                _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
+                def kvm01 = _pillar['return'][0].values()[0].values()[0]
+
+                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
+                def targetProvider = _pillar['return'][0].values()[0]
+
+                salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
+                salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
+            }
+        } else if (HOST_TYPE.toLowerCase() == 'osd') {
+            def osd_ids = []
+
+            // get list of osd disks of the host
+            salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
+            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+
+            for (i in ceph_disks) {
+                def osd_id = i.getKey().toString()
+                osd_ids.add('osd.' + osd_id)
+                print("Will delete " + osd_id)
+            }
+
+            // `ceph osd out <id> <id>`
+            stage('Set OSDs out') {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+            }
+
+            // wait for healthy cluster
+            if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+                sleep(5)
+                waitForHealthy(pepperEnv)
+            }
+
+            // stop osd daemons
+            stage('Stop OSD daemons') {
+                for (i in osd_ids) {
+                    salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+                }
+            }
+
+            // `ceph osd crush remove osd.2`
+            stage('Remove OSDs from CRUSH') {
+                for (i in osd_ids) {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+                }
+            }
+
+            // remove keyring `ceph auth del osd.3`
+            stage('Remove OSD keyrings from auth') {
+                for (i in osd_ids) {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+                }
+            }
+
+            // remove osd `ceph osd rm osd.3`
+            stage('Remove OSDs') {
+                for (i in osd_ids) {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+                }
+            }
+
+            for (osd_id in osd_ids) {
+
+                id = osd_id.replaceAll('osd.', '')
+                def dmcrypt = ""
+                try {
+                    dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+
+                if (dmcrypt?.trim()) {
+                    mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                    dev = mount.split()[0].replaceAll("[0-9]","")
+
+                    // remove partition tables
+                    stage("dd part table on ${dev}") {
+                        runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                    }
+
+                }
+                // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                stage('Remove journal / block_db / block_wal partition') {
+                    def partition_uuid = ""
+                    def journal_partition_uuid = ""
+                    def block_db_partition_uuid = ""
+                    def block_wal_partition_uuid = ""
+                    try {
+                        journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+                        journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    } catch (Exception e) {
+                        common.infoMsg(e)
+                    }
+                    try {
+                        block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+                        block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    } catch (Exception e) {
+                        common.infoMsg(e)
+                    }
+
+                    try {
+                        block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+                        block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                    } catch (Exception e) {
+                        common.infoMsg(e)
+                    }
+
+                    // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                    if (journal_partition_uuid?.trim()) {
+                        partition_uuid = journal_partition_uuid
+                    } else if (block_db_partition_uuid?.trim()) {
+                        partition_uuid = block_db_partition_uuid
+                    }
+
+                    // if disk has journal, block_db or block_wal on different disk, then remove the partition
+                    if (partition_uuid?.trim()) {
+                        removePartition(pepperEnv, HOST, partition_uuid)
+                    }
+                    if (block_wal_partition_uuid?.trim()) {
+                        removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+                    }
+                }
+            }
+
+            // purge Ceph pkgs
+            stage('Purge Ceph OSD pkgs') {
+                runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
+            }
+
+            stage('Remove OSD host from crushmap') {
+                def hostname = runCephCommand(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
+                try {
+                    runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+            }
+
+            // stop salt-minion service and move its configuration
+            stage('Stop salt-minion') {
+                salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
+                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
             }
         }
 
-        // remove osd `ceph osd rm osd.3`
-        stage('Remove OSDs') {
-            for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
-            }
-        }
-
-        for (osd_id in osd_ids) {
-
-            id = osd_id.replaceAll('osd.', '')
-            def dmcrypt = ""
+        stage('Remove salt-key') {
             try {
-                dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+                salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
             } catch (Exception e) {
                 common.warningMsg(e)
             }
-
-            if (dmcrypt?.trim()) {
-                mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
-                dev = mount.split()[0].replaceAll("[0-9]","")
-
-                // remove partition tables
-                stage("dd part table on ${dev}") {
-                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
-                }
-
-            }
-            // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-            stage('Remove journal / block_db / block_wal partition') {
-                def partition_uuid = ""
-                def journal_partition_uuid = ""
-                def block_db_partition_uuid = ""
-                def block_wal_partition_uuid = ""
-                try {
-                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
-                    journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
-                } catch (Exception e) {
-                    common.infoMsg(e)
-                }
-                try {
-                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
-                    block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
-                } catch (Exception e) {
-                    common.infoMsg(e)
-                }
-
-                try {
-                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
-                    block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
-                } catch (Exception e) {
-                    common.infoMsg(e)
-                }
-
-                // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
-                if (journal_partition_uuid?.trim()) {
-                    partition_uuid = journal_partition_uuid
-                } else if (block_db_partition_uuid?.trim()) {
-                    partition_uuid = block_db_partition_uuid
-                }
-
-                // if disk has journal, block_db or block_wal on different disk, then remove the partition
-                if (partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, partition_uuid)
-                }
-                if (block_wal_partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, block_wal_partition_uuid)
-                }
-            }
-        }
-
-        // purge Ceph pkgs
-        stage('Purge Ceph OSD pkgs') {
-            runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
-        }
-
-        stage('Remove OSD host from crushmap') {
-            def hostname = runCephCommand(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
             try {
-                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
+                salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
             } catch (Exception e) {
                 common.warningMsg(e)
             }
         }
 
-        // stop salt-minion service and move its configuration
-        stage('Stop salt-minion') {
-            salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
-            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
-        }
-    }
-
-    stage('Remove salt-key') {
-        try {
-            salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-        try {
-            salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-    }
-
-    stage('Remove keyring') {
-        def keyring = ""
-        def keyring_lines = ""
-        try {
-            keyring_lines = runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-        for (line in keyring_lines) {
-            if (line.toLowerCase().contains(target.toLowerCase())) {
-                keyring = line
-                break
-            }
-        }
-        if (keyring?.trim()) {
-            runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
-        }
-    }
-
-    if (HOST_TYPE.toLowerCase() == 'mon') {
-        // Update Monmap
-        stage('Update monmap') {
-            runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
+        stage('Remove keyring') {
+            def keyring = ""
+            def keyring_lines = ""
             try {
-                runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
+                keyring_lines = runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
             } catch (Exception e) {
                 common.warningMsg(e)
             }
-            runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
-        }
-
-        def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
-        print target_hosts
-
-        // Update configs
-        stage('Update Ceph configs') {
-            for (tgt in target_hosts) {
-                salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
+            for (line in keyring_lines) {
+                if (line.toLowerCase().contains(target.toLowerCase())) {
+                    keyring = line
+                    break
+                }
+            }
+            if (keyring?.trim()) {
+                runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
             }
         }
-    }
 
-    if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
-        stage('Generate CRUSHMAP') {
-            salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
+        if (HOST_TYPE.toLowerCase() == 'mon') {
+            // Update Monmap
+            stage('Update monmap') {
+                runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
+                try {
+                    runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
+            }
+
+            def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
+            print target_hosts
+
+            // Update configs
+            stage('Update Ceph configs') {
+                for (tgt in target_hosts) {
+                    salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
+                }
+            }
+        }
+
+        if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
+            stage('Generate CRUSHMAP') {
+                salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
+            }
         }
     }
 }
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 8483f3a..04a176b 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -57,149 +57,150 @@
         sleep(10)
     }
 }
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
 
-node("python") {
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-    if (flags.size() > 0) {
-        stage('Set cluster flags') {
-            for (flag in flags) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+        if (flags.size() > 0) {
+            stage('Set cluster flags') {
+                for (flag in flags) {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                }
             }
         }
-    }
 
-    def osd_ids = []
+        def osd_ids = []
 
-    // get list of osd disks of the host
-    salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-    def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
-    common.prettyPrint(ceph_disks)
+        // get list of osd disks of the host
+        salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
+        def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+        common.prettyPrint(ceph_disks)
 
-    for (i in ceph_disks) {
-        def osd_id = i.getKey().toString()
-        if (osd_id in osds || OSD == '*') {
-            osd_ids.add('osd.' + osd_id)
-            print("Will delete " + osd_id)
-        } else {
-            print("Skipping " + osd_id)
-        }
-    }
-
-    // wait for healthy cluster
-    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-        waitForHealthy(pepperEnv)
-    }
-
-    // `ceph osd out <id> <id>`
-    stage('Set OSDs out') {
-        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
-    }
-
-    // wait for healthy cluster
-    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-        sleep(5)
-        waitForHealthy(pepperEnv)
-    }
-
-    // stop osd daemons
-    stage('Stop OSD daemons') {
-        for (i in osd_ids) {
-            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
-        }
-    }
-
-    // `ceph osd crush remove osd.2`
-    stage('Remove OSDs from CRUSH') {
-        for (i in osd_ids) {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
-        }
-    }
-
-    // remove keyring `ceph auth del osd.3`
-    stage('Remove OSD keyrings from auth') {
-        for (i in osd_ids) {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
-        }
-    }
-
-    // remove osd `ceph osd rm osd.3`
-    stage('Remove OSDs') {
-        for (i in osd_ids) {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
-        }
-    }
-
-    for (osd_id in osd_ids) {
-
-        id = osd_id.replaceAll('osd.', '')
-        def dmcrypt = ""
-        try {
-            dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-
-        if (dmcrypt?.trim()) {
-            mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
-            dev = mount.split()[0].replaceAll("[0-9]","")
-
-            // remove partition tables
-            stage("dd part table on ${dev}") {
-                runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+        for (i in ceph_disks) {
+            def osd_id = i.getKey().toString()
+            if (osd_id in osds || OSD == '*') {
+                osd_ids.add('osd.' + osd_id)
+                print("Will delete " + osd_id)
+            } else {
+                print("Skipping " + osd_id)
             }
-
         }
-        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-        stage('Remove journal / block_db / block_wal partition') {
-            def partition_uuid = ""
-            def journal_partition_uuid = ""
-            def block_db_partition_uuid = ""
-            def block_wal_partition_uuid = ""
+
+        // wait for healthy cluster
+        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+            waitForHealthy(pepperEnv)
+        }
+
+        // `ceph osd out <id> <id>`
+        stage('Set OSDs out') {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+        }
+
+        // wait for healthy cluster
+        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+            sleep(5)
+            waitForHealthy(pepperEnv)
+        }
+
+        // stop osd daemons
+        stage('Stop OSD daemons') {
+            for (i in osd_ids) {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+            }
+        }
+
+        // `ceph osd crush remove osd.2`
+        stage('Remove OSDs from CRUSH') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+            }
+        }
+
+        // remove keyring `ceph auth del osd.3`
+        stage('Remove OSD keyrings from auth') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+            }
+        }
+
+        // remove osd `ceph osd rm osd.3`
+        stage('Remove OSDs') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+            }
+        }
+
+        for (osd_id in osd_ids) {
+
+            id = osd_id.replaceAll('osd.', '')
+            def dmcrypt = ""
             try {
-                journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
-                journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
             } catch (Exception e) {
-                common.infoMsg(e)
-            }
-            try {
-                block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
-                block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
-            } catch (Exception e) {
-                common.infoMsg(e)
+                common.warningMsg(e)
             }
 
-            try {
-                block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
-                block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
-            } catch (Exception e) {
-                common.infoMsg(e)
-            }
+            if (dmcrypt?.trim()) {
+                mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                dev = mount.split()[0].replaceAll("[0-9]","")
 
-            // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
-            if (journal_partition_uuid?.trim()) {
-                partition_uuid = journal_partition_uuid
-            } else if (block_db_partition_uuid?.trim()) {
-                partition_uuid = block_db_partition_uuid
-            }
+                // remove partition tables
+                stage("dd part table on ${dev}") {
+                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                }
 
-            // if disk has journal, block_db or block_wal on different disk, then remove the partition
-            if (partition_uuid?.trim()) {
-                removePartition(pepperEnv, HOST, partition_uuid)
             }
-            if (block_wal_partition_uuid?.trim()) {
-                removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+            // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+            stage('Remove journal / block_db / block_wal partition') {
+                def partition_uuid = ""
+                def journal_partition_uuid = ""
+                def block_db_partition_uuid = ""
+                def block_wal_partition_uuid = ""
+                try {
+                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+                    journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+                try {
+                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+                    block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                try {
+                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+                    block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                if (journal_partition_uuid?.trim()) {
+                    partition_uuid = journal_partition_uuid
+                } else if (block_db_partition_uuid?.trim()) {
+                    partition_uuid = block_db_partition_uuid
+                }
+
+                // if disk has journal, block_db or block_wal on different disk, then remove the partition
+                if (partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, partition_uuid)
+                }
+                if (block_wal_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+                }
             }
         }
-    }
-    // remove cluster flags
-    if (flags.size() > 0) {
-        stage('Unset cluster flags') {
-            for (flag in flags) {
-                common.infoMsg('Removing flag ' + flag)
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+        // remove cluster flags
+        if (flags.size() > 0) {
+            stage('Unset cluster flags') {
+                for (flag in flags) {
+                    common.infoMsg('Removing flag ' + flag)
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                }
             }
         }
     }
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 0a27dc5..93b6573 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -44,189 +44,190 @@
         sleep(10)
     }
 }
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
 
-node("python") {
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        def osd_ids = []
 
-    def osd_ids = []
+        for (osd_id in osds) {
+            osd_ids.add('osd.' + osd_id)
+            print("Will delete " + osd_id)
+        }
 
-    for (osd_id in osds) {
-        osd_ids.add('osd.' + osd_id)
-        print("Will delete " + osd_id)
-    }
+        // `ceph osd out <id> <id>`
+        stage('Set OSDs out') {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+        }
 
-    // `ceph osd out <id> <id>`
-    stage('Set OSDs out') {
-        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
-    }
-
-    // wait for healthy cluster
-    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-        sleep(5)
-        waitForHealthy(pepperEnv)
-    }
+        // wait for healthy cluster
+        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+            sleep(5)
+            waitForHealthy(pepperEnv)
+        }
 
 
-    if (flags.size() > 0) {
-        stage('Set cluster flags') {
-            for (flag in flags) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+        if (flags.size() > 0) {
+            stage('Set cluster flags') {
+                for (flag in flags) {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                }
             }
         }
-    }
 
-    // stop osd daemons
-    stage('Stop OSD daemons') {
-        for (i in osd_ids) {
-            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+        // stop osd daemons
+        stage('Stop OSD daemons') {
+            for (i in osd_ids) {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+            }
         }
-    }
-    /*
-    // `ceph osd crush remove osd.2`
-    stage('Remove OSDs from CRUSH') {
-        for (i in osd_ids) {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+        /*
+        // `ceph osd crush remove osd.2`
+        stage('Remove OSDs from CRUSH') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+            }
         }
-    }
 
-    // wait for pgs to rebalance
-    if (WAIT_FOR_PG_REBALANCE.toBoolean() == true) {
-        stage('Waiting for pgs to rebalance') {
-            while (true) {
-                def status = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph -s')['return'][0].values()[0]
-                if (!status.contains('degraded')) {
-                    common.infoMsg('PGs rebalanced')
-                    break;
+        // wait for pgs to rebalance
+        if (WAIT_FOR_PG_REBALANCE.toBoolean() == true) {
+            stage('Waiting for pgs to rebalance') {
+                while (true) {
+                    def status = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph -s')['return'][0].values()[0]
+                    if (!status.contains('degraded')) {
+                        common.infoMsg('PGs rebalanced')
+                        break;
+                    }
+                    sleep(10)
                 }
+            }
+        }
+        */
+        // remove keyring `ceph auth del osd.3`
+        stage('Remove OSD keyrings from auth') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+            }
+        }
+
+        // remove osd `ceph osd rm osd.3`
+        stage('Remove OSDs') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+            }
+        }
+
+        if (DMCRYPT.toBoolean() == true) {
+
+            // remove partition tables
+            stage('dd part tables') {
+                for (dev in devices) {
+                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                }
+            }
+
+            // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
+            stage('Remove journal / block_db / block_wal partitions') {
+                for (partition in journals_blockdbs_blockwals) {
+                    if (partition?.trim()) {
+                        // dev = /dev/sdi
+                        def dev = partition.replaceAll("[0-9]", "")
+                        // part_id = 2
+                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                        try {
+                            runCephCommand(pepperEnv, HOST, "Ignore | parted ${dev} rm ${part_id}")
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
+                    }
+                }
+            }
+
+            // reboot
+            stage('reboot and wait') {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'system.reboot', null, null, true, 5)
+                salt.minionsReachable(pepperEnv, 'I@salt:master', HOST)
                 sleep(10)
             }
-        }
-    }
-    */
-    // remove keyring `ceph auth del osd.3`
-    stage('Remove OSD keyrings from auth') {
-        for (i in osd_ids) {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
-        }
-    }
 
-    // remove osd `ceph osd rm osd.3`
-    stage('Remove OSDs') {
-        for (i in osd_ids) {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
-        }
-    }
 
-    if (DMCRYPT.toBoolean() == true) {
 
-        // remove partition tables
-        stage('dd part tables') {
-            for (dev in devices) {
-                runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
-            }
-        }
-
-        // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
-        stage('Remove journal / block_db / block_wal partitions') {
-            for (partition in journals_blockdbs_blockwals) {
-                if (partition?.trim()) {
-                    // dev = /dev/sdi
-                    def dev = partition.replaceAll("[0-9]", "")
-                    // part_id = 2
-                    def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+            // zap disks `ceph-disk zap /dev/sdi`
+            stage('Zap devices') {
+                for (dev in devices) {
                     try {
-                        runCephCommand(pepperEnv, HOST, "Ignore | parted ${dev} rm ${part_id}")
+                        runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
                     } catch (Exception e) {
                         common.warningMsg(e)
                     }
-                }
-            }
-        }
-
-        // reboot
-        stage('reboot and wait') {
-            salt.runSaltProcessStep(pepperEnv, HOST, 'system.reboot', null, null, true, 5)
-            salt.minionsReachable(pepperEnv, 'I@salt:master', HOST)
-            sleep(10)
-        }
-
-
-
-        // zap disks `ceph-disk zap /dev/sdi`
-        stage('Zap devices') {
-            for (dev in devices) {
-                try {
                     runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
-                } catch (Exception e) {
-                    common.warningMsg(e)
                 }
-                runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
             }
-        }
 
-    } else {
+        } else {
 
-        // umount `umount /dev/sdi1`
-        stage('Umount devices') {
-            for (dev in devices) {
-                runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+            // umount `umount /dev/sdi1`
+            stage('Umount devices') {
+                for (dev in devices) {
+                    runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+                }
             }
-        }
 
-        // zap disks `ceph-disk zap /dev/sdi`
-        stage('Zap devices') {
-            for (dev in devices) {
-                runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+            // zap disks `ceph-disk zap /dev/sdi`
+            stage('Zap devices') {
+                for (dev in devices) {
+                    runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+                }
             }
-        }
 
-        // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
-        stage('Remove journal / block_db / block_wal partitions') {
-            for (partition in journals_blockdbs_blockwals) {
-                if (partition?.trim()) {
-                    // dev = /dev/sdi
-                    def dev = partition.replaceAll("[0-9]", "")
-                    // part_id = 2
-                    def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-                    try {
-                        runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
-                    } catch (Exception e) {
-                        common.warningMsg(e)
+            // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
+            stage('Remove journal / block_db / block_wal partitions') {
+                for (partition in journals_blockdbs_blockwals) {
+                    if (partition?.trim()) {
+                        // dev = /dev/sdi
+                        def dev = partition.replaceAll("[0-9]", "")
+                        // part_id = 2
+                        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                        try {
+                            runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
+                        } catch (Exception e) {
+                            common.warningMsg(e)
+                        }
                     }
                 }
             }
         }
-    }
 
-    // Deploy failed Ceph OSD
-    stage('Deploy Ceph OSD') {
-        salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
-    }
+        // Deploy failed Ceph OSD
+        stage('Deploy Ceph OSD') {
+            salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
+        }
 
-    // remove cluster flags
-    if (flags.size() > 0) {
-        stage('Unset cluster flags') {
-            for (flag in flags) {
-                common.infoMsg('Removing flag ' + flag)
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+        // remove cluster flags
+        if (flags.size() > 0) {
+            stage('Unset cluster flags') {
+                for (flag in flags) {
+                    common.infoMsg('Removing flag ' + flag)
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                }
             }
         }
-    }
 
-    /*
-    if (ENFORCE_CRUSHMAP.toBoolean() == true) {
+        /*
+        if (ENFORCE_CRUSHMAP.toBoolean() == true) {
 
-        // enforce crushmap `crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled; ceph osd setcrushmap -i /etc/ceph/crushmap.compiled`
-        stage('Enforce crushmap') {
+            // enforce crushmap `crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled; ceph osd setcrushmap -i /etc/ceph/crushmap.compiled`
+            stage('Enforce crushmap') {
 
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure that your ADMIN_HOST has correct /etc/ceph/crushmap file? Click proceed to compile and enforce crushmap."
+                stage('Ask for manual confirmation') {
+                    input message: "Are you sure that your ADMIN_HOST has correct /etc/ceph/crushmap file? Click proceed to compile and enforce crushmap."
+                }
+                runCephCommand(pepperEnv, ADMIN_HOST, 'crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled')
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd setcrushmap -i /etc/ceph/crushmap.compiled')
             }
-            runCephCommand(pepperEnv, ADMIN_HOST, 'crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled')
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd setcrushmap -i /etc/ceph/crushmap.compiled')
         }
+        */
     }
-    */
-}
+}
\ No newline at end of file
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 8323e41..ec232b8 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -141,83 +141,84 @@
     sleep(5)
     return
 }
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
 
-node("python") {
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    // create connection to salt master
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-    if (BACKUP_ENABLED.toBoolean() == true) {
-        if (STAGE_UPGRADE_MON.toBoolean() == true) {
-            backup(pepperEnv, 'mon')
-        }
-        if (STAGE_UPGRADE_RGW.toBoolean() == true) {
-            backup(pepperEnv, 'radosgw')
-        }
-        if (STAGE_UPGRADE_OSD.toBoolean() == true) {
-            backup(pepperEnv, 'osd')
-        }
-    }
-
-    if (flags.size() > 0) {
-        stage('Set cluster flags') {
-            for (flag in flags) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+        if (BACKUP_ENABLED.toBoolean() == true) {
+            if (STAGE_UPGRADE_MON.toBoolean() == true) {
+                backup(pepperEnv, 'mon')
+            }
+            if (STAGE_UPGRADE_RGW.toBoolean() == true) {
+                backup(pepperEnv, 'radosgw')
+            }
+            if (STAGE_UPGRADE_OSD.toBoolean() == true) {
+                backup(pepperEnv, 'osd')
             }
         }
-    }
 
-    if (STAGE_UPGRADE_MON.toBoolean() == true) {
-        upgrade(pepperEnv, 'mon')
-    }
-
-    if (STAGE_UPGRADE_MGR.toBoolean() == true) {
-        upgrade(pepperEnv, 'mgr')
-    }
-
-    if (STAGE_UPGRADE_OSD.toBoolean() == true) {
-        upgrade(pepperEnv, 'osd')
-    }
-
-    if (STAGE_UPGRADE_RGW.toBoolean() == true) {
-        upgrade(pepperEnv, 'radosgw')
-    }
-
-    if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
-        upgrade(pepperEnv, 'common')
-    }
-
-    // remove cluster flags
-    if (flags.size() > 0) {
-        stage('Unset cluster flags') {
-            for (flag in flags) {
-                if (!flag.contains('sortbitwise')) {
-                    common.infoMsg('Removing flag ' + flag)
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+        if (flags.size() > 0) {
+            stage('Set cluster flags') {
+                for (flag in flags) {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
                 }
-
             }
         }
-    }
 
-    if (STAGE_FINALIZE.toBoolean() == true) {
-        stage("Finalize ceph version upgrade") {
-            runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
-            try {
-                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            try {
-                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
-            } catch (Exception e) {
-                common.warningMsg(e)
+        if (STAGE_UPGRADE_MON.toBoolean() == true) {
+            upgrade(pepperEnv, 'mon')
+        }
+
+        if (STAGE_UPGRADE_MGR.toBoolean() == true) {
+            upgrade(pepperEnv, 'mgr')
+        }
+
+        if (STAGE_UPGRADE_OSD.toBoolean() == true) {
+            upgrade(pepperEnv, 'osd')
+        }
+
+        if (STAGE_UPGRADE_RGW.toBoolean() == true) {
+            upgrade(pepperEnv, 'radosgw')
+        }
+
+        if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
+            upgrade(pepperEnv, 'common')
+        }
+
+        // remove cluster flags
+        if (flags.size() > 0) {
+            stage('Unset cluster flags') {
+                for (flag in flags) {
+                    if (!flag.contains('sortbitwise')) {
+                        common.infoMsg('Removing flag ' + flag)
+                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                    }
+
+                }
             }
         }
-    }
 
-    // wait for healthy cluster
-    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-        waitForHealthy(pepperEnv)
+        if (STAGE_FINALIZE.toBoolean() == true) {
+            stage("Finalize ceph version upgrade") {
+                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
+                try {
+                    runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                try {
+                    runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+            }
+        }
+
+        // wait for healthy cluster
+        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+            waitForHealthy(pepperEnv)
+        }
     }
 }
diff --git a/change-config.groovy b/change-config.groovy
index 0b4538c..35cc99f 100644
--- a/change-config.groovy
+++ b/change-config.groovy
@@ -24,79 +24,80 @@
 def minions
 def result
 def states
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
 
-node() {
-    try {
-
-        if (TARGET_STATES != "") {
-            states = TARGET_STATES
-        }
-        else {
-            states = null
-        }
-
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        if (common.validInputParam("PULL_MODEL") && PULL_MODEL.toBoolean() == true) {
-            stage('Update the reclass cluster model') {
-                def saltMasterTarget = ['expression': 'I@salt:master', 'type': 'compound']
-                result = salt.runSaltCommand(pepperEnv, 'local', saltMasterTarget, 'state.apply', null, "reclass.storage.data")
-                salt.checkResult(result)
-            }
-        }
-
-        stage('List target servers') {
-            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-            if (minions.isEmpty()) {
-                throw new Exception("No minion was targeted")
-            }
-            if (TARGET_SUBSET_TEST != "") {
-                targetTestSubset = ['expression': minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or '), 'type': 'compound']
+            if (TARGET_STATES != "") {
+                states = TARGET_STATES
             }
             else {
-                targetTestSubset = ['expression': minions.join(' or '), 'type': 'compound']
+                states = null
             }
-            targetLiveSubset = ['expression': minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or '), 'type': 'compound']
-            targetLiveAll = ['expression': minions.join(' or '), 'type': 'compound']
-            common.infoMsg("Found nodes: ${targetLiveAll.expression}")
-            common.infoMsg("Selected test nodes: ${targetTestSubset.expression}")
-            common.infoMsg("Selected sample nodes: ${targetLiveSubset.expression}")
-        }
 
-        stage('Test config changes') {
-            def kwargs = [
-                'test': true
-            ]
-            result = salt.runSaltCommand(pepperEnv, 'local', targetTestSubset, 'state.apply', null, states, kwargs)
-            salt.checkResult(result)
-        }
-
-        stage('Confirm live changes on sample') {
-            timeout(time: 2, unit: 'HOURS') {
-               input message: "Approve live config change on ${targetLiveSubset.expression} nodes?"
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
-        }
 
-        stage('Apply config changes on sample') {
-            result = salt.runSaltCommand(pepperEnv, 'local', targetLiveSubset, 'state.apply', null, states)
-            salt.checkResult(result)
-        }
-
-        stage('Confirm live changes on all nodes') {
-            timeout(time: 2, unit: 'HOURS') {
-               input message: "Approve live config change on ${targetLiveAll.expression} nodes?"
+            if (common.validInputParam("PULL_MODEL") && PULL_MODEL.toBoolean() == true) {
+                stage('Update the reclass cluster model') {
+                    def saltMasterTarget = ['expression': 'I@salt:master', 'type': 'compound']
+                    result = salt.runSaltCommand(pepperEnv, 'local', saltMasterTarget, 'state.apply', null, "reclass.storage.data")
+                    salt.checkResult(result)
+                }
             }
-        }
 
-        stage('Apply config changes on all nodes') {
-            result = salt.runSaltCommand(pepperEnv, 'local', targetLiveAll, 'state.apply', null, states)
-            salt.checkResult(result)
-        }
+            stage('List target servers') {
+                minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+                if (TARGET_SUBSET_TEST != "") {
+                    targetTestSubset = ['expression': minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or '), 'type': 'compound']
+                }
+                else {
+                    targetTestSubset = ['expression': minions.join(' or '), 'type': 'compound']
+                }
+                targetLiveSubset = ['expression': minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or '), 'type': 'compound']
+                targetLiveAll = ['expression': minions.join(' or '), 'type': 'compound']
+                common.infoMsg("Found nodes: ${targetLiveAll.expression}")
+                common.infoMsg("Selected test nodes: ${targetTestSubset.expression}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset.expression}")
+            }
 
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
+            stage('Test config changes') {
+                def kwargs = [
+                    'test': true
+                ]
+                result = salt.runSaltCommand(pepperEnv, 'local', targetTestSubset, 'state.apply', null, states, kwargs)
+                salt.checkResult(result)
+            }
+
+            stage('Confirm live changes on sample') {
+                timeout(time: 2, unit: 'HOURS') {
+                   input message: "Approve live config change on ${targetLiveSubset.expression} nodes?"
+                }
+            }
+
+            stage('Apply config changes on sample') {
+                result = salt.runSaltCommand(pepperEnv, 'local', targetLiveSubset, 'state.apply', null, states)
+                salt.checkResult(result)
+            }
+
+            stage('Confirm live changes on all nodes') {
+                timeout(time: 2, unit: 'HOURS') {
+                   input message: "Approve live config change on ${targetLiveAll.expression} nodes?"
+                }
+            }
+
+            stage('Apply config changes on all nodes') {
+                result = salt.runSaltCommand(pepperEnv, 'local', targetLiveAll, 'state.apply', null, states)
+                salt.checkResult(result)
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        }
     }
 }
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 9fc42d3..d985b3f 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -35,320 +35,321 @@
 
 def pepperEnv = "pepperEnv"
 _MAX_PERMITTED_STACKS = 2
-
-node {
-    try {
-        // connection objects
-        def openstackCloud
-
-        // value defaults
-        def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-        def openstackEnv = "${env.WORKSPACE}/venv"
-
+timeout(time: 12, unit: 'HOURS') {
+    node {
         try {
-            sshPubKey = SSH_PUBLIC_KEY
-        } catch (MissingPropertyException e) {
-            sshPubKey = false
-        }
+            // connection objects
+            def openstackCloud
 
-        if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
-            error("If you want to reuse existing stack you need to provide it's name")
-        }
+            // value defaults
+            def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+            def openstackEnv = "${env.WORKSPACE}/venv"
 
-        if (HEAT_STACK_REUSE.toBoolean() == false) {
-            // Don't allow to set custom heat stack name
-            wrap([$class: 'BuildUser']) {
-                if (env.BUILD_USER_ID) {
-                    HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
-                } else {
-                    HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
-                }
-                currentBuild.description = HEAT_STACK_NAME
+            try {
+                sshPubKey = SSH_PUBLIC_KEY
+            } catch (MissingPropertyException e) {
+                sshPubKey = false
             }
-        }
 
-        //
-        // Bootstrap
-        //
+            if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
+                error("If you want to reuse existing stack you need to provide it's name")
+            }
 
-        stage ('Download Heat templates') {
-            git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
-        }
+            if (HEAT_STACK_REUSE.toBoolean() == false) {
+                // Don't allow to set custom heat stack name
+                wrap([$class: 'BuildUser']) {
+                    if (env.BUILD_USER_ID) {
+                        HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
+                    } else {
+                        HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
+                    }
+                    currentBuild.description = HEAT_STACK_NAME
+                }
+            }
 
-        stage('Install OpenStack CLI') {
-            openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-        }
+            //
+            // Bootstrap
+            //
 
-        stage('Connect to OpenStack cloud') {
-            openstackCloud = openstack.createOpenstackEnv(
-                OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
-                OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
-                OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
-                OPENSTACK_API_VERSION)
-            openstack.getKeystoneToken(openstackCloud, openstackEnv)
-            wrap([$class: 'BuildUser']) {
-                if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
-                    def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
-                    if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
-                        HEAT_STACK_DELETE = "false"
-                        throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
+            stage ('Download Heat templates') {
+                git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
+            }
+
+            stage('Install OpenStack CLI') {
+                openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+            }
+
+            stage('Connect to OpenStack cloud') {
+                openstackCloud = openstack.createOpenstackEnv(
+                    OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                    OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+                    OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                    OPENSTACK_API_VERSION)
+                openstack.getKeystoneToken(openstackCloud, openstackEnv)
+                wrap([$class: 'BuildUser']) {
+                    if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
+                        def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
+                        if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
+                            HEAT_STACK_DELETE = "false"
+                            throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
+                        }
                     }
                 }
             }
-        }
 
-        if (HEAT_STACK_REUSE.toBoolean() == false) {
-            stage('Launch new Heat stack') {
-                envParams = [
-                    'cluster_zone': HEAT_STACK_ZONE,
-                    'cluster_public_net': HEAT_STACK_PUBLIC_NET
-                ]
-                openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
-            }
-        }
-
-        stage('Connect to Salt master') {
-            def saltMasterPort
-            try {
-                saltMasterPort = SALT_MASTER_PORT
-            } catch (MissingPropertyException e) {
-                saltMasterPort = 6969
-            }
-            saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
-            currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
-            saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
-            python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
-        }
-
-        //
-        // Install
-        //
-
-        stage('Install core infra') {
-            // salt.master, reclass
-            // refresh_pillar
-            // sync_all
-            // linux,openssh,salt.minion.ntp
-
-            orchestrate.installFoundationInfra(pepperEnv)
-            orchestrate.validateFoundationInfra(pepperEnv)
-        }
-
-        stage("Deploy GlusterFS") {
-            salt.enforceState(pepperEnv, 'I@glusterfs:server', 'glusterfs.server.service', true)
-            retry(2) {
-                salt.enforceState(pepperEnv, 'ci01*', 'glusterfs.server.setup', true)
-            }
-            sleep(5)
-            salt.enforceState(pepperEnv, 'I@glusterfs:client', 'glusterfs.client', true)
-
-            timeout(5) {
-                println "Waiting for GlusterFS volumes to get mounted.."
-                salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
-            }
-            print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
-        }
-
-        stage("Deploy GlusterFS") {
-            salt.enforceState(pepperEnv, 'I@haproxy:proxy', 'haproxy,keepalived')
-        }
-
-        stage("Setup Docker Swarm") {
-            salt.enforceState(pepperEnv, 'I@docker:host', 'docker.host', true)
-            salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.swarm', true)
-            salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'salt', true)
-            salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.flush')
-            salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.update')
-            salt.enforceState(pepperEnv, 'I@docker:swarm', 'docker.swarm', true)
-            print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@docker:swarm:role:master', 'docker node ls'))
-        }
-
-        stage("Configure OSS services") {
-            salt.enforceState(pepperEnv, 'I@devops_portal:config', 'devops_portal.config')
-            salt.enforceState(pepperEnv, 'I@rundeck:server', 'rundeck.server')
-        }
-
-        stage("Deploy Docker services") {
-            // We need /etc/aptly-publisher.yaml to be present before
-            // services are deployed
-            // XXX: for some weird unknown reason, refresh_pillar is
-            // required to execute here
-            salt.runSaltProcessStep(pepperEnv, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
-            salt.enforceState(pepperEnv, 'I@aptly:publisher', 'aptly.publisher', true)
-            retry(3) {
-                sleep(5)
-                salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.client')
-            }
-            // XXX: Workaround to have `/var/lib/jenkins` on all
-            // nodes where are jenkins_slave services are created.
-            salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
-        }
-
-        stage("Configure CI/CD services") {
-            salt.syncAll(pepperEnv, '*')
-
-            // Aptly
-            timeout(10) {
-                println "Waiting for Aptly to come up.."
-                retry(2) {
-                    // XXX: retry to workaround magical VALUE_TRIMMED
-                    // response from salt master + to give slow cloud some
-                    // more time to settle down
-                    salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+            if (HEAT_STACK_REUSE.toBoolean() == false) {
+                stage('Launch new Heat stack') {
+                    envParams = [
+                        'cluster_zone': HEAT_STACK_ZONE,
+                        'cluster_public_net': HEAT_STACK_PUBLIC_NET
+                    ]
+                    openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
                 }
             }
-            salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
 
-            // OpenLDAP
-            timeout(10) {
-                println "Waiting for OpenLDAP to come up.."
-                salt.cmdRun(pepperEnv, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
-            }
-            salt.enforceState(pepperEnv, 'I@openldap:client', 'openldap', true)
-
-            // Gerrit
-            timeout(10) {
-                println "Waiting for Gerrit to come up.."
-                salt.cmdRun(pepperEnv, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
-            }
-            salt.enforceState(pepperEnv, 'I@gerrit:client', 'gerrit', true)
-
-            // Jenkins
-            timeout(10) {
-                println "Waiting for Jenkins to come up.."
-                salt.cmdRun(pepperEnv, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
-            }
-            retry(2) {
-                // XXX: needs retry as first run installs python-jenkins
-                // thus make jenkins modules available for second run
-                salt.enforceState(pepperEnv, 'I@jenkins:client', 'jenkins', true)
-            }
-
-            // Postgres client - initialize OSS services databases
-            timeout(300){
-                println "Waiting for postgresql database to come up.."
-                salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
-            }
-            // XXX: first run usually fails on some inserts, but we need to create databases at first 
-            salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true, false)
-
-            // Setup postgres database with integration between
-            // Pushkin notification service and Security Monkey security audit service
-            timeout(10) {
-                println "Waiting for Pushkin to come up.."
-                salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
-            }
-            salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true)
-
-            // Rundeck
-            timeout(10) {
-                println "Waiting for Rundeck to come up.."
-                salt.cmdRun(pepperEnv, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
-            }
-            salt.enforceState(pepperEnv, 'I@rundeck:client', 'rundeck.client', true)
-
-            // Elasticsearch
-            timeout(10) {
-                println 'Waiting for Elasticsearch to come up..'
-                salt.cmdRun(pepperEnv, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
-            }
-            retry(3){
-              sleep(10)
-              // XXX: first run sometimes fails on update indexes, so we need to wait
-              salt.enforceState(pepperEnv, 'I@elasticsearch:client', 'elasticsearch.client', true)
-            }
-        }
-
-        stage("Finalize") {
-            //
-            // Deploy user's ssh key
-            //
-            def adminUser
-            def authorizedKeysFile
-            def adminUserCmdOut = salt.cmdRun(pepperEnv, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
-            if (adminUserCmdOut =~ /ubuntu user exists/) {
-                adminUser = "ubuntu"
-                authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
-            } else {
-                adminUser = "root"
-                authorizedKeysFile = "/root/.ssh/authorized_keys"
-            }
-
-            if (sshPubKey) {
-                println "Deploying provided ssh key at ${authorizedKeysFile}"
-                salt.cmdRun(pepperEnv, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
-            }
-
-            //
-            // Generate docs
-            //
-            try {
+            stage('Connect to Salt master') {
+                def saltMasterPort
                 try {
-                    // Run sphinx state to install sphinx-build needed in
-                    // upcomming orchestrate
-                    salt.enforceState(pepperEnv, 'I@sphinx:server', 'sphinx')
+                    saltMasterPort = SALT_MASTER_PORT
+                } catch (MissingPropertyException e) {
+                    saltMasterPort = 6969
+                }
+                saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
+                currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
+                saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
+                python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
+            }
+
+            //
+            // Install
+            //
+
+            stage('Install core infra') {
+                // salt.master, reclass
+                // refresh_pillar
+                // sync_all
+                // linux,openssh,salt.minion.ntp
+
+                orchestrate.installFoundationInfra(pepperEnv)
+                orchestrate.validateFoundationInfra(pepperEnv)
+            }
+
+            stage("Deploy GlusterFS") {
+                salt.enforceState(pepperEnv, 'I@glusterfs:server', 'glusterfs.server.service', true)
+                retry(2) {
+                    salt.enforceState(pepperEnv, 'ci01*', 'glusterfs.server.setup', true)
+                }
+                sleep(5)
+                salt.enforceState(pepperEnv, 'I@glusterfs:client', 'glusterfs.client', true)
+
+                timeout(5) {
+                    println "Waiting for GlusterFS volumes to get mounted.."
+                    salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
+                }
+                print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
+            }
+
+            stage("Deploy GlusterFS") {
+                salt.enforceState(pepperEnv, 'I@haproxy:proxy', 'haproxy,keepalived')
+            }
+
+            stage("Setup Docker Swarm") {
+                salt.enforceState(pepperEnv, 'I@docker:host', 'docker.host', true)
+                salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.swarm', true)
+                salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'salt', true)
+                salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.flush')
+                salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.update')
+                salt.enforceState(pepperEnv, 'I@docker:swarm', 'docker.swarm', true)
+                print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@docker:swarm:role:master', 'docker node ls'))
+            }
+
+            stage("Configure OSS services") {
+                salt.enforceState(pepperEnv, 'I@devops_portal:config', 'devops_portal.config')
+                salt.enforceState(pepperEnv, 'I@rundeck:server', 'rundeck.server')
+            }
+
+            stage("Deploy Docker services") {
+                // We need /etc/aptly-publisher.yaml to be present before
+                // services are deployed
+                // XXX: for some weird unknown reason, refresh_pillar is
+                // required to execute here
+                salt.runSaltProcessStep(pepperEnv, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(pepperEnv, 'I@aptly:publisher', 'aptly.publisher', true)
+                retry(3) {
+                    sleep(5)
+                    salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.client')
+                }
+                // XXX: Workaround to have `/var/lib/jenkins` on all
+                // nodes where are jenkins_slave services are created.
+                salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
+            }
+
+            stage("Configure CI/CD services") {
+                salt.syncAll(pepperEnv, '*')
+
+                // Aptly
+                timeout(10) {
+                    println "Waiting for Aptly to come up.."
+                    retry(2) {
+                        // XXX: retry to workaround magical VALUE_TRIMMED
+                        // response from salt master + to give slow cloud some
+                        // more time to settle down
+                        salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+                    }
+                }
+                salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
+
+                // OpenLDAP
+                timeout(10) {
+                    println "Waiting for OpenLDAP to come up.."
+                    salt.cmdRun(pepperEnv, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
+                }
+                salt.enforceState(pepperEnv, 'I@openldap:client', 'openldap', true)
+
+                // Gerrit
+                timeout(10) {
+                    println "Waiting for Gerrit to come up.."
+                    salt.cmdRun(pepperEnv, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
+                }
+                salt.enforceState(pepperEnv, 'I@gerrit:client', 'gerrit', true)
+
+                // Jenkins
+                timeout(10) {
+                    println "Waiting for Jenkins to come up.."
+                    salt.cmdRun(pepperEnv, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
+                }
+                retry(2) {
+                    // XXX: needs retry as first run installs python-jenkins
+                    // thus make jenkins modules available for second run
+                    salt.enforceState(pepperEnv, 'I@jenkins:client', 'jenkins', true)
+                }
+
+                // Postgres client - initialize OSS services databases
+                timeout(300){
+                    println "Waiting for postgresql database to come up.."
+                    salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
+                }
+                // XXX: first run usually fails on some inserts, but we need to create databases at first 
+                salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true, false)
+
+                // Setup postgres database with integration between
+                // Pushkin notification service and Security Monkey security audit service
+                timeout(10) {
+                    println "Waiting for Pushkin to come up.."
+                    salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
+                }
+                salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true)
+
+                // Rundeck
+                timeout(10) {
+                    println "Waiting for Rundeck to come up.."
+                    salt.cmdRun(pepperEnv, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
+                }
+                salt.enforceState(pepperEnv, 'I@rundeck:client', 'rundeck.client', true)
+
+                // Elasticsearch
+                timeout(10) {
+                    println 'Waiting for Elasticsearch to come up..'
+                    salt.cmdRun(pepperEnv, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
+                }
+                retry(3){
+                  sleep(10)
+                  // XXX: first run sometimes fails on update indexes, so we need to wait
+                  salt.enforceState(pepperEnv, 'I@elasticsearch:client', 'elasticsearch.client', true)
+                }
+            }
+
+            stage("Finalize") {
+                //
+                // Deploy user's ssh key
+                //
+                def adminUser
+                def authorizedKeysFile
+                def adminUserCmdOut = salt.cmdRun(pepperEnv, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
+                if (adminUserCmdOut =~ /ubuntu user exists/) {
+                    adminUser = "ubuntu"
+                    authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
+                } else {
+                    adminUser = "root"
+                    authorizedKeysFile = "/root/.ssh/authorized_keys"
+                }
+
+                if (sshPubKey) {
+                    println "Deploying provided ssh key at ${authorizedKeysFile}"
+                    salt.cmdRun(pepperEnv, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
+                }
+
+                //
+                // Generate docs
+                //
+                try {
+                    try {
+                        // Run sphinx state to install sphinx-build needed in
+                        // upcomming orchestrate
+                        salt.enforceState(pepperEnv, 'I@sphinx:server', 'sphinx')
+                    } catch (Throwable e) {
+                        true
+                    }
+                    retry(3) {
+                        // TODO: fix salt.orchestrateSystem
+                        // print salt.orchestrateSystem(pepperEnv, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
+                        def out = salt.cmdRun(pepperEnv, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
+                        print common.prettyPrint(out)
+                        if (out =~ /Command execution failed/) {
+                            throw new Exception("Command execution failed")
+                        }
+                    }
                 } catch (Throwable e) {
+                    // We don't want sphinx docs to ruin whole build, so possible
+                    // errors are just ignored here
                     true
                 }
-                retry(3) {
-                    // TODO: fix salt.orchestrateSystem
-                    // print salt.orchestrateSystem(pepperEnv, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
-                    def out = salt.cmdRun(pepperEnv, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
-                    print common.prettyPrint(out)
-                    if (out =~ /Command execution failed/) {
-                        throw new Exception("Command execution failed")
-                    }
+                salt.enforceState(pepperEnv, 'I@nginx:server', 'nginx')
+
+                def failedSvc = salt.cmdRun(pepperEnv, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
+                if (failedSvc =~ /Command execution failed/) {
+                    common.errorMsg("Some services are not running. Environment may not be fully functional!")
                 }
-            } catch (Throwable e) {
-                // We don't want sphinx docs to ruin whole build, so possible
-                // errors are just ignored here
-                true
+
+                common.successMsg("""
+    ============================================================
+    Your CI/CD lab has been deployed and you can enjoy it:
+    Use sshuttle to connect to your private subnet:
+
+        sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
+
+    And visit services running at 172.16.10.254 (vip address):
+
+        9600    HAProxy statistics
+        8080    Gerrit
+        8081    Jenkins
+        8089    LDAP administration
+        4440    Rundeck
+        8084    DevOps Portal
+        8091    Docker swarm visualizer
+        8090    Reclass-generated documentation
+
+    If you provided SSH_PUBLIC_KEY, you can use it to login,
+    otherwise you need to get private key connected to this
+    heat template.
+
+    DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
+    ============================================================""")
             }
-            salt.enforceState(pepperEnv, 'I@nginx:server', 'nginx')
-
-            def failedSvc = salt.cmdRun(pepperEnv, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
-            if (failedSvc =~ /Command execution failed/) {
-                common.errorMsg("Some services are not running. Environment may not be fully functional!")
-            }
-
-            common.successMsg("""
-============================================================
-Your CI/CD lab has been deployed and you can enjoy it:
-Use sshuttle to connect to your private subnet:
-
-    sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
-
-And visit services running at 172.16.10.254 (vip address):
-
-    9600    HAProxy statistics
-    8080    Gerrit
-    8081    Jenkins
-    8089    LDAP administration
-    4440    Rundeck
-    8084    DevOps Portal
-    8091    Docker swarm visualizer
-    8090    Reclass-generated documentation
-
-If you provided SSH_PUBLIC_KEY, you can use it to login,
-otherwise you need to get private key connected to this
-heat template.
-
-DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
-============================================================""")
-        }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-        throw e
-    } finally {
-        // Cleanup
-        if (HEAT_STACK_DELETE.toBoolean() == true) {
-            stage('Trigger cleanup job') {
-                build(job: 'deploy-stack-cleanup', parameters: [
-                    [$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
-                ])
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        } finally {
+            // Cleanup
+            if (HEAT_STACK_DELETE.toBoolean() == true) {
+                stage('Trigger cleanup job') {
+                    build(job: 'deploy-stack-cleanup', parameters: [
+                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
+                    ])
+                }
             }
         }
     }
diff --git a/cleanup-pipeline.groovy b/cleanup-pipeline.groovy
index cd0a463..ec3e19d 100644
--- a/cleanup-pipeline.groovy
+++ b/cleanup-pipeline.groovy
@@ -26,53 +26,53 @@
 openstack = new com.mirantis.mk.Openstack()
 aws = new com.mirantis.mk.Aws()
 salt = new com.mirantis.mk.Salt()
+timeout(time: 12, unit: 'HOURS') {
+    node {
 
-node {
+        def venv_path = "${env.WORKSPACE}/venv"
+        def env_vars
 
-    def venv_path = "${env.WORKSPACE}/venv"
-    def env_vars
-
-    // default STACK_TYPE is heat
-    if (!common.validInputParam('STACK_TYPE')) {
-        STACK_TYPE = 'heat'
-    }
-
-    stage('Install environment') {
-        if (STACK_TYPE == 'heat') {
-
-            def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-            openstack.setupOpenstackVirtualenv(venv_path, openstackVersion)
-
-        } else if (STACK_TYPE == 'aws') {
-
-            env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_DEFAULT_REGION)
-            aws.setupVirtualEnv(venv_path)
-
-        } else {
-            throw new Exception('Stack type is not supported')
+        // default STACK_TYPE is heat
+        if (!common.validInputParam('STACK_TYPE')) {
+            STACK_TYPE = 'heat'
         }
 
-    }
+        stage('Install environment') {
+            if (STACK_TYPE == 'heat') {
 
-    stage('Delete stack') {
-        if (STACK_TYPE == 'heat') {
-            def openstackCloud = openstack.createOpenstackEnv(
-                OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
-                OPENSTACK_API_PROJECT,OPENSTACK_API_PROJECT_DOMAIN,
-                OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
-                OPENSTACK_API_VERSION)
-            openstack.getKeystoneToken(openstackCloud, venv_path)
+                def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+                openstack.setupOpenstackVirtualenv(venv_path, openstackVersion)
 
-            common.infoMsg("Deleting Heat Stack " + STACK_NAME)
-            openstack.deleteHeatStack(openstackCloud, STACK_NAME, venv_path)
-        } else if (STACK_TYPE == 'aws') {
+            } else if (STACK_TYPE == 'aws') {
 
-            aws.deleteStack(venv_path, env_vars, STACK_NAME)
+                env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_DEFAULT_REGION)
+                aws.setupVirtualEnv(venv_path)
 
-        } else {
-            throw new Exception('Stack type is not supported')
+            } else {
+                throw new Exception('Stack type is not supported')
+            }
+
         }
 
-    }
+        stage('Delete stack') {
+            if (STACK_TYPE == 'heat') {
+                def openstackCloud = openstack.createOpenstackEnv(
+                    OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                    OPENSTACK_API_PROJECT,OPENSTACK_API_PROJECT_DOMAIN,
+                    OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                    OPENSTACK_API_VERSION)
+                openstack.getKeystoneToken(openstackCloud, venv_path)
 
+                common.infoMsg("Deleting Heat Stack " + STACK_NAME)
+                openstack.deleteHeatStack(openstackCloud, STACK_NAME, venv_path)
+            } else if (STACK_TYPE == 'aws') {
+
+                aws.deleteStack(venv_path, env_vars, STACK_NAME)
+
+            } else {
+                throw new Exception('Stack type is not supported')
+            }
+
+        }
+    }
 }
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 11ac571..cda022b 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -87,500 +87,501 @@
 if (common.validInputParam('SLAVE_NODE')) {
     slave_node = SLAVE_NODE
 }
+timeout(time: 12, unit: 'HOURS') {
+    node(slave_node) {
+        try {
+            // Set build-specific variables
+            venv = "${env.WORKSPACE}/venv"
+            venvPepper = "${env.WORKSPACE}/venvPepper"
 
-node(slave_node) {
-    try {
-        // Set build-specific variables
-        venv = "${env.WORKSPACE}/venv"
-        venvPepper = "${env.WORKSPACE}/venvPepper"
+            //
+            // Prepare machines
+            //
+            stage ('Create infrastructure') {
 
-        //
-        // Prepare machines
-        //
-        stage ('Create infrastructure') {
+                outputs.put('stack_type', STACK_TYPE)
 
-            outputs.put('stack_type', STACK_TYPE)
+                if (STACK_TYPE == 'heat') {
+                    // value defaults
+                    envParams = [
+                        'cluster_zone': HEAT_STACK_ZONE,
+                        'cluster_public_net': HEAT_STACK_PUBLIC_NET
+                    ]
 
-            if (STACK_TYPE == 'heat') {
-                // value defaults
-                envParams = [
-                    'cluster_zone': HEAT_STACK_ZONE,
-                    'cluster_public_net': HEAT_STACK_PUBLIC_NET
-                ]
-
-                if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
-                    error("If you want to reuse existing stack you need to provide it's name")
-                }
-
-                if (STACK_REUSE.toBoolean() == false) {
-                    // Don't allow to set custom heat stack name
-                    wrap([$class: 'BuildUser']) {
-                        if (env.BUILD_USER_ID) {
-                            STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
-                        } else {
-                            STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
-                        }
-                        currentBuild.description = STACK_NAME
-                    }
-                }
-
-                // no underscore in STACK_NAME
-                STACK_NAME = STACK_NAME.replaceAll('_', '-')
-                outputs.put('stack_name', STACK_NAME)
-
-                // set description
-                currentBuild.description = "${STACK_NAME}"
-
-                // get templates
-                git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
-
-                // create openstack env
-                openstack.setupOpenstackVirtualenv(venv, OPENSTACK_API_CLIENT)
-                openstackCloud = openstack.createOpenstackEnv(
-                    OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
-                    OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
-                    OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
-                    OPENSTACK_API_VERSION)
-                openstack.getKeystoneToken(openstackCloud, venv)
-
-                //
-                // Verify possibility of create stack for given user and stack type
-                //
-                wrap([$class: 'BuildUser']) {
-                    if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !STACK_REUSE.toBoolean()) {
-                        def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}".replaceAll('_', '-'), venv)
-                        if (existingStacks.size() >= _MAX_PERMITTED_STACKS) {
-                            STACK_DELETE = "false"
-                            throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
-                        }
-                    }
-                }
-                // launch stack
-                if (STACK_REUSE.toBoolean() == false) {
-
-                    // set reclass repo in heat env
-                    try {
-                        envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
-                        envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
-                    } catch (MissingPropertyException e) {
-                        common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
+                    if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
+                        error("If you want to reuse existing stack you need to provide it's name")
                     }
 
-                    // put formulas revision - stable, testing or nightly
-                    if (common.validInputParam('FORMULA_PKG_REVISION')) {
-                        common.infoMsg("Setting formulas revision to ${FORMULA_PKG_REVISION}")
-                        envParams.put('cfg_formula_pkg_revision', FORMULA_PKG_REVISION)
-                    }
-
-                    // put extra repo definitions
-                    if (common.validInputParam('BOOTSTRAP_EXTRA_REPO_PARAMS')) {
-                        common.infoMsg("Setting additional repo during bootstrap to ${BOOTSTRAP_EXTRA_REPO_PARAMS}")
-                        envParams.put('cfg_bootstrap_extra_repo_params', BOOTSTRAP_EXTRA_REPO_PARAMS)
-                    }
-
-                    // put extra salt-formulas
-                    if (common.validInputParam('EXTRA_FORMULAS')) {
-                        common.infoMsg("Setting extra salt-formulas to ${EXTRA_FORMULAS}")
-                        envParams.put('cfg_extra_formulas', EXTRA_FORMULAS)
-                    }
-
-                    openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
-                }
-
-                // get SALT_MASTER_URL
-                saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
-                // check that saltMasterHost is valid
-                if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
-                    common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
-                    throw new Exception("saltMasterHost is not a valid ip")
-                }
-
-                currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
-
-                SALT_MASTER_URL = "http://${saltMasterHost}:6969"
-
-            } else if (STACK_TYPE == 'aws') {
-
-                // setup environment
-                aws.setupVirtualEnv(venv)
-
-                // set aws_env_vars
-                aws_env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
-
-                if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
-                    error("If you want to reuse existing stack you need to provide it's name")
-                }
-
-                if (STACK_REUSE.toBoolean() == false) {
-                    // Don't allow to set custom stack name
-                    wrap([$class: 'BuildUser']) {
-                        if (env.BUILD_USER_ID) {
-                            STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
-                        } else {
-                            STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
+                    if (STACK_REUSE.toBoolean() == false) {
+                        // Don't allow to set custom heat stack name
+                        wrap([$class: 'BuildUser']) {
+                            if (env.BUILD_USER_ID) {
+                                STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
+                            } else {
+                                STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
+                            }
+                            currentBuild.description = STACK_NAME
                         }
                     }
 
                     // no underscore in STACK_NAME
                     STACK_NAME = STACK_NAME.replaceAll('_', '-')
-                }
+                    outputs.put('stack_name', STACK_NAME)
 
-                // set description
-                currentBuild.description = STACK_NAME
-                outputs.put('stack_name', STACK_NAME)
+                    // set description
+                    currentBuild.description = "${STACK_NAME}"
 
-                if (STACK_REUSE.toBoolean() == false) {
                     // get templates
                     git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
 
-                    // start stack
-                    def stack_params = [
-                        "ParameterKey=KeyName,ParameterValue=" + AWS_SSH_KEY,
-                        "ParameterKey=CmpNodeCount,ParameterValue=" + STACK_COMPUTE_COUNT
-                    ]
-                    def template_file = 'cfn/' + STACK_TEMPLATE + '.yml'
-                    aws.createStack(venv, aws_env_vars, template_file, STACK_NAME, stack_params)
-                }
+                    // create openstack env
+                    openstack.setupOpenstackVirtualenv(venv, OPENSTACK_API_CLIENT)
+                    openstackCloud = openstack.createOpenstackEnv(
+                        OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                        OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+                        OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                        OPENSTACK_API_VERSION)
+                    openstack.getKeystoneToken(openstackCloud, venv)
 
-                // wait for stack to be ready
-                aws.waitForStatus(venv, aws_env_vars, STACK_NAME, 'CREATE_COMPLETE')
-
-                // get outputs
-                saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
-
-                // check that saltMasterHost is valid
-                if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
-                    common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
-                    throw new Exception("saltMasterHost is not a valid ip")
-                }
-
-                currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
-                SALT_MASTER_URL = "http://${saltMasterHost}:6969"
-
-            } else if (STACK_TYPE != 'physical') {
-                throw new Exception("STACK_TYPE ${STACK_TYPE} is not supported")
-            }
-
-            outputs.put('salt_api', SALT_MASTER_URL)
-
-            // Setup virtualenv for pepper
-            python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-
-        // Set up override params
-        if (common.validInputParam('SALT_OVERRIDES')) {
-            stage('Set Salt overrides') {
-                salt.setSaltOverrides(venvPepper,  SALT_OVERRIDES)
-            }
-        }
-
-        //
-        // Install
-        //
-
-        if (common.checkContains('STACK_INSTALL', 'core')) {
-            stage('Install core infrastructure') {
-                def staticMgmtNetwork = false
-                if (common.validInputParam('STATIC_MGMT_NETWORK')) {
-                    staticMgmtNetwork = STATIC_MGMT_NETWORK.toBoolean()
-                }
-                orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
-
-                if (common.checkContains('STACK_INSTALL', 'kvm')) {
-                    orchestrate.installInfraKvm(venvPepper)
-                    orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
-                }
-
-                orchestrate.validateFoundationInfra(venvPepper)
-            }
-        }
-
-        // install k8s
-        if (common.checkContains('STACK_INSTALL', 'k8s')) {
-
-            stage('Install Kubernetes infra') {
-                if (STACK_TYPE == 'aws') {
-                    // configure kubernetes_control_address - save loadbalancer
-                    def awsOutputs = aws.getOutputs(venv, aws_env_vars, STACK_NAME)
-                    common.prettyPrint(awsOutputs)
-                    if (awsOutputs.containsKey('ControlLoadBalancer')) {
-                        salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', awsOutputs['ControlLoadBalancer']], null, true)
-                        outputs.put('kubernetes_apiserver', 'https://' + awsOutputs['ControlLoadBalancer'])
+                    //
+                    // Verify possibility of create stack for given user and stack type
+                    //
+                    wrap([$class: 'BuildUser']) {
+                        if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !STACK_REUSE.toBoolean()) {
+                            def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}".replaceAll('_', '-'), venv)
+                            if (existingStacks.size() >= _MAX_PERMITTED_STACKS) {
+                                STACK_DELETE = "false"
+                                throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
+                            }
+                        }
                     }
-                }
+                    // launch stack
+                    if (STACK_REUSE.toBoolean() == false) {
 
-                // ensure certificates are generated properly
-                salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
-
-                orchestrate.installKubernetesInfra(venvPepper)
-            }
-
-            if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                stage('Install Contrail for Kubernetes') {
-                    orchestrate.installContrailNetwork(venvPepper)
-                    orchestrate.installContrailCompute(venvPepper)
-                }
-            }
-
-            stage('Install Kubernetes control') {
-                orchestrate.installKubernetesControl(venvPepper)
-
-                // collect artifacts (kubeconfig)
-                writeFile(file: 'kubeconfig', text: salt.getFileContent(venvPepper, 'I@kubernetes:master and *01*', '/etc/kubernetes/admin-kube-config'))
-                archiveArtifacts(artifacts: 'kubeconfig')
-            }
-
-            stage('Install Kubernetes computes') {
-                if (common.validInputParam('STACK_COMPUTE_COUNT')) {
-                    if (STACK_COMPUTE_COUNT > 0) {
-                        if (STACK_TYPE == 'aws') {
-                            // get stack info
-                            def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
-
-                            //update autoscaling group
-                            aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
-
-                            // wait for computes to boot up
-                            aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
-                            sleep(60)
-
-                        } else if (STACK_TYPE == 'heat') {
-                            envParams.put('cluster_node_count', STACK_COMPUTE_COUNT)
-
-                            openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, "update")
-                            sleep(60)
+                        // set reclass repo in heat env
+                        try {
+                            envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
+                            envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
+                        } catch (MissingPropertyException e) {
+                            common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
                         }
 
+                        // put formulas revision - stable, testing or nightly
+                        if (common.validInputParam('FORMULA_PKG_REVISION')) {
+                            common.infoMsg("Setting formulas revision to ${FORMULA_PKG_REVISION}")
+                            envParams.put('cfg_formula_pkg_revision', FORMULA_PKG_REVISION)
+                        }
+
+                        // put extra repo definitions
+                        if (common.validInputParam('BOOTSTRAP_EXTRA_REPO_PARAMS')) {
+                            common.infoMsg("Setting additional repo during bootstrap to ${BOOTSTRAP_EXTRA_REPO_PARAMS}")
+                            envParams.put('cfg_bootstrap_extra_repo_params', BOOTSTRAP_EXTRA_REPO_PARAMS)
+                        }
+
+                        // put extra salt-formulas
+                        if (common.validInputParam('EXTRA_FORMULAS')) {
+                            common.infoMsg("Setting extra salt-formulas to ${EXTRA_FORMULAS}")
+                            envParams.put('cfg_extra_formulas', EXTRA_FORMULAS)
+                        }
+
+                        openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
+                    }
+
+                    // get SALT_MASTER_URL
+                    saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
+                    // check that saltMasterHost is valid
+                    if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                        common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                        throw new Exception("saltMasterHost is not a valid ip")
+                    }
+
+                    currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
+
+                    SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+
+                } else if (STACK_TYPE == 'aws') {
+
+                    // setup environment
+                    aws.setupVirtualEnv(venv)
+
+                    // set aws_env_vars
+                    aws_env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+
+                    if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
+                        error("If you want to reuse existing stack you need to provide it's name")
+                    }
+
+                    if (STACK_REUSE.toBoolean() == false) {
+                        // Don't allow to set custom stack name
+                        wrap([$class: 'BuildUser']) {
+                            if (env.BUILD_USER_ID) {
+                                STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
+                            } else {
+                                STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
+                            }
+                        }
+
+                        // no underscore in STACK_NAME
+                        STACK_NAME = STACK_NAME.replaceAll('_', '-')
+                    }
+
+                    // set description
+                    currentBuild.description = STACK_NAME
+                    outputs.put('stack_name', STACK_NAME)
+
+                    if (STACK_REUSE.toBoolean() == false) {
+                        // get templates
+                        git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
+
+                        // start stack
+                        def stack_params = [
+                            "ParameterKey=KeyName,ParameterValue=" + AWS_SSH_KEY,
+                            "ParameterKey=CmpNodeCount,ParameterValue=" + STACK_COMPUTE_COUNT
+                        ]
+                        def template_file = 'cfn/' + STACK_TEMPLATE + '.yml'
+                        aws.createStack(venv, aws_env_vars, template_file, STACK_NAME, stack_params)
+                    }
+
+                    // wait for stack to be ready
+                    aws.waitForStatus(venv, aws_env_vars, STACK_NAME, 'CREATE_COMPLETE')
+
+                    // get outputs
+                    saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
+
+                    // check that saltMasterHost is valid
+                    if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                        common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                        throw new Exception("saltMasterHost is not a valid ip")
+                    }
+
+                    currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
+                    SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+
+                } else if (STACK_TYPE != 'physical') {
+                    throw new Exception("STACK_TYPE ${STACK_TYPE} is not supported")
+                }
+
+                outputs.put('salt_api', SALT_MASTER_URL)
+
+                // Setup virtualenv for pepper
+                python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
+
+
+            // Set up override params
+            if (common.validInputParam('SALT_OVERRIDES')) {
+                stage('Set Salt overrides') {
+                    salt.setSaltOverrides(venvPepper,  SALT_OVERRIDES)
+                }
+            }
+
+            //
+            // Install
+            //
+
+            if (common.checkContains('STACK_INSTALL', 'core')) {
+                stage('Install core infrastructure') {
+                    def staticMgmtNetwork = false
+                    if (common.validInputParam('STATIC_MGMT_NETWORK')) {
+                        staticMgmtNetwork = STATIC_MGMT_NETWORK.toBoolean()
+                    }
+                    orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
+
+                    if (common.checkContains('STACK_INSTALL', 'kvm')) {
+                        orchestrate.installInfraKvm(venvPepper)
+                        orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
+                    }
+
+                    orchestrate.validateFoundationInfra(venvPepper)
+                }
+            }
+
+            // install k8s
+            if (common.checkContains('STACK_INSTALL', 'k8s')) {
+
+                stage('Install Kubernetes infra') {
+                    if (STACK_TYPE == 'aws') {
+                        // configure kubernetes_control_address - save loadbalancer
+                        def awsOutputs = aws.getOutputs(venv, aws_env_vars, STACK_NAME)
+                        common.prettyPrint(awsOutputs)
+                        if (awsOutputs.containsKey('ControlLoadBalancer')) {
+                            salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', awsOutputs['ControlLoadBalancer']], null, true)
+                            outputs.put('kubernetes_apiserver', 'https://' + awsOutputs['ControlLoadBalancer'])
+                        }
+                    }
+
+                    // ensure certificates are generated properly
+                    salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
+
+                    orchestrate.installKubernetesInfra(venvPepper)
+                }
+
+                if (common.checkContains('STACK_INSTALL', 'contrail')) {
+                    stage('Install Contrail for Kubernetes') {
+                        orchestrate.installContrailNetwork(venvPepper)
+                        orchestrate.installContrailCompute(venvPepper)
                     }
                 }
 
-                orchestrate.installKubernetesCompute(venvPepper)
-            }
-        }
+                stage('Install Kubernetes control') {
+                    orchestrate.installKubernetesControl(venvPepper)
 
-        // install openstack
-        if (common.checkContains('STACK_INSTALL', 'openstack')) {
-            // install Infra and control, tests, ...
-
-            stage('Install OpenStack infra') {
-                orchestrate.installOpenstackInfra(venvPepper)
-            }
-
-            stage('Install OpenStack control') {
-                orchestrate.installOpenstackControl(venvPepper)
-            }
-
-            stage('Install OpenStack network') {
-
-                if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    orchestrate.installContrailNetwork(venvPepper)
-                } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
-                    orchestrate.installOpenstackNetwork(venvPepper)
+                    // collect artifacts (kubeconfig)
+                    writeFile(file: 'kubeconfig', text: salt.getFileContent(venvPepper, 'I@kubernetes:master and *01*', '/etc/kubernetes/admin-kube-config'))
+                    archiveArtifacts(artifacts: 'kubeconfig')
                 }
 
-                salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
-                salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
+                stage('Install Kubernetes computes') {
+                    if (common.validInputParam('STACK_COMPUTE_COUNT')) {
+                        if (STACK_COMPUTE_COUNT > 0) {
+                            if (STACK_TYPE == 'aws') {
+                                // get stack info
+                                def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
+
+                                //update autoscaling group
+                                aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
+
+                                // wait for computes to boot up
+                                aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
+                                sleep(60)
+
+                            } else if (STACK_TYPE == 'heat') {
+                                envParams.put('cluster_node_count', STACK_COMPUTE_COUNT)
+
+                                openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, "update")
+                                sleep(60)
+                            }
+
+                        }
+                    }
+
+                    orchestrate.installKubernetesCompute(venvPepper)
+                }
             }
 
-            if (salt.testTarget(venvPepper, 'I@ironic:conductor')){
-                stage('Install OpenStack Ironic conductor') {
-                    orchestrate.installIronicConductor(venvPepper)
+            // install openstack
+            if (common.checkContains('STACK_INSTALL', 'openstack')) {
+                // install Infra and control, tests, ...
+
+                stage('Install OpenStack infra') {
+                    orchestrate.installOpenstackInfra(venvPepper)
+                }
+
+                stage('Install OpenStack control') {
+                    orchestrate.installOpenstackControl(venvPepper)
+                }
+
+                stage('Install OpenStack network') {
+
+                    if (common.checkContains('STACK_INSTALL', 'contrail')) {
+                        orchestrate.installContrailNetwork(venvPepper)
+                    } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
+                        orchestrate.installOpenstackNetwork(venvPepper)
+                    }
+
+                    salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
+                    salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
+                }
+
+                if (salt.testTarget(venvPepper, 'I@ironic:conductor')){
+                    stage('Install OpenStack Ironic conductor') {
+                        orchestrate.installIronicConductor(venvPepper)
+                    }
+                }
+
+
+                stage('Install OpenStack compute') {
+                    orchestrate.installOpenstackCompute(venvPepper)
+
+                    if (common.checkContains('STACK_INSTALL', 'contrail')) {
+                        orchestrate.installContrailCompute(venvPepper)
+                    }
+                }
+
+            }
+
+            // install ceph
+            if (common.checkContains('STACK_INSTALL', 'ceph')) {
+                stage('Install Ceph MONs') {
+                    orchestrate.installCephMon(venvPepper)
+                }
+
+                stage('Install Ceph OSDs') {
+                    orchestrate.installCephOsd(venvPepper)
+                }
+
+
+                stage('Install Ceph clients') {
+                    orchestrate.installCephClient(venvPepper)
+                }
+
+                stage('Connect Ceph') {
+                    orchestrate.connectCeph(venvPepper)
+                }
+            }
+
+            if (common.checkContains('STACK_INSTALL', 'oss')) {
+              stage('Install Oss infra') {
+                orchestrate.installOssInfra(venvPepper)
+              }
+            }
+
+            if (common.checkContains('STACK_INSTALL', 'cicd')) {
+                stage('Install Cicd') {
+                    orchestrate.installInfra(venvPepper)
+                    orchestrate.installDockerSwarm(venvPepper)
+                    orchestrate.installCicd(venvPepper)
+                }
+            }
+
+            if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
+                stage('Install StackLight v1') {
+                    orchestrate.installStacklightv1Control(venvPepper)
+                    orchestrate.installStacklightv1Client(venvPepper)
+                }
+            }
+
+            if (common.checkContains('STACK_INSTALL', 'stacklight')) {
+                stage('Install StackLight') {
+                    orchestrate.installDockerSwarm(venvPepper)
+                    orchestrate.installStacklight(venvPepper)
+                }
+            }
+
+            if (common.checkContains('STACK_INSTALL', 'oss')) {
+              stage('Install OSS') {
+                if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
+                  // In case if StackLightv2 enabled containers already started
+                  orchestrate.installDockerSwarm(venvPepper)
+                  salt.enforceState(venvPepper, 'I@docker:swarm:role:master and I@devops_portal:config', 'docker.client', true)
+                }
+                orchestrate.installOss(venvPepper)
+              }
+            }
+
+            //
+            // Test
+            //
+            def artifacts_dir = '_artifacts/'
+
+            if (common.checkContains('STACK_TEST', 'k8s')) {
+                stage('Run k8s conformance e2e tests') {
+                    def image = TEST_K8S_CONFORMANCE_IMAGE
+                    def output_file = image.replaceAll('/', '-') + '.output'
+
+                    // run image
+                    test.runConformanceTests(venvPepper, 'ctl01*', TEST_K8S_API_SERVER, image)
+
+                    // collect output
+                    sh "mkdir -p ${artifacts_dir}"
+                    file_content = salt.getFileContent(venvPepper, 'ctl01*', '/tmp/' + output_file)
+                    writeFile file: "${artifacts_dir}${output_file}", text: file_content
+                    sh "cat ${artifacts_dir}${output_file}"
+
+                    // collect artifacts
+                    archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+                }
+            }
+
+            if (common.checkContains('STACK_TEST', 'openstack')) {
+                if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
+                    test.install_docker(venvPepper, TEST_TEMPEST_TARGET)
+                }
+                stage('Run OpenStack tests') {
+                    test.runTempestTests(venvPepper, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
+                }
+
+                stage('Copy Tempest results to config node') {
+                    test.copyTempestResults(venvPepper, TEST_TEMPEST_TARGET)
+                }
+
+                stage('Archive rally artifacts') {
+                    test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
                 }
             }
 
 
-            stage('Install OpenStack compute') {
-                orchestrate.installOpenstackCompute(venvPepper)
-
-                if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    orchestrate.installContrailCompute(venvPepper)
+            if (common.checkContains('STACK_TEST', 'ceph')) {
+                stage('Run infra tests') {
+                    sleep(120)
+                    def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
+                    salt.cmdRun(venvPepper, 'I@salt:master', cmd, false)
+                    writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper, 'I@salt:master', '/root/report.xml'))
+                    junit(keepLongStdio: true, testResults: 'report.xml')
                 }
             }
 
-        }
+            if (common.checkContains('STACK_TEST', 'opencontrail')) {
+                stage('Run opencontrail tests') {
+                    def opencontrail_tests_dir = "/opt/opencontrail_test/fuel-plugin-contrail/plugin_test/vapor/"
+                    def report_dir = "/opt/opencontrail-test-report/"
+                    def cmd = ". ${opencontrail_tests_dir}exports.sh && " +
+                              "cd ${opencontrail_tests_dir} && " +
+                              "py.test --junit-xml=${report_dir}report.xml" +
+                              " --html=${report_dir}report.html -v vapor/tests/ -k 'not destructive' "
 
-        // install ceph
-        if (common.checkContains('STACK_INSTALL', 'ceph')) {
-            stage('Install Ceph MONs') {
-                orchestrate.installCephMon(venvPepper)
-            }
+                    salt.runSaltProcessStep(venvPepper, 'cfg*', 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(venvPepper, 'I@opencontrail:test' , 'opencontrail.test' , true)
 
-            stage('Install Ceph OSDs') {
-                orchestrate.installCephOsd(venvPepper)
+                    salt.cmdRun(venvPepper, 'I@opencontrail:test', cmd, false)
+
+                    writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper,
+                              'I@opencontrail:test', "${report_dir}report.xml"))
+                    junit(keepLongStdio: true, testResults: 'report.xml')
+                }
             }
 
 
-            stage('Install Ceph clients') {
-                orchestrate.installCephClient(venvPepper)
+            stage('Finalize') {
+                if (common.checkContains('STACK_INSTALL', 'finalize')) {
+                    salt.runSaltProcessStep(venvPepper, '*', 'state.apply', [], null, true)
+                }
+
+                outputsPretty = common.prettify(outputs)
+                print(outputsPretty)
+                writeFile(file: 'outputs.json', text: outputsPretty)
+                archiveArtifacts(artifacts: 'outputs.json')
             }
 
-            stage('Connect Ceph') {
-                orchestrate.connectCeph(venvPepper)
-            }
-        }
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        } finally {
 
-        if (common.checkContains('STACK_INSTALL', 'oss')) {
-          stage('Install Oss infra') {
-            orchestrate.installOssInfra(venvPepper)
-          }
-        }
 
-        if (common.checkContains('STACK_INSTALL', 'cicd')) {
-            stage('Install Cicd') {
-                orchestrate.installInfra(venvPepper)
-                orchestrate.installDockerSwarm(venvPepper)
-                orchestrate.installCicd(venvPepper)
-            }
-        }
+            //
+            // Clean
+            //
 
-        if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
-            stage('Install StackLight v1') {
-                orchestrate.installStacklightv1Control(venvPepper)
-                orchestrate.installStacklightv1Client(venvPepper)
-            }
-        }
-
-        if (common.checkContains('STACK_INSTALL', 'stacklight')) {
-            stage('Install StackLight') {
-                orchestrate.installDockerSwarm(venvPepper)
-                orchestrate.installStacklight(venvPepper)
-            }
-        }
-
-        if (common.checkContains('STACK_INSTALL', 'oss')) {
-          stage('Install OSS') {
-            if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
-              // In case if StackLightv2 enabled containers already started
-              orchestrate.installDockerSwarm(venvPepper)
-              salt.enforceState(venvPepper, 'I@docker:swarm:role:master and I@devops_portal:config', 'docker.client', true)
-            }
-            orchestrate.installOss(venvPepper)
-          }
-        }
-
-        //
-        // Test
-        //
-        def artifacts_dir = '_artifacts/'
-
-        if (common.checkContains('STACK_TEST', 'k8s')) {
-            stage('Run k8s conformance e2e tests') {
-                def image = TEST_K8S_CONFORMANCE_IMAGE
-                def output_file = image.replaceAll('/', '-') + '.output'
-
-                // run image
-                test.runConformanceTests(venvPepper, 'ctl01*', TEST_K8S_API_SERVER, image)
-
-                // collect output
-                sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(venvPepper, 'ctl01*', '/tmp/' + output_file)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                sh "cat ${artifacts_dir}${output_file}"
-
-                // collect artifacts
-                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-            }
-        }
-
-        if (common.checkContains('STACK_TEST', 'openstack')) {
-            if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
-                test.install_docker(venvPepper, TEST_TEMPEST_TARGET)
-            }
-            stage('Run OpenStack tests') {
-                test.runTempestTests(venvPepper, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
+            if (common.validInputParam('STACK_NAME')) {
+                // send notification
+                common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
             }
 
-            stage('Copy Tempest results to config node') {
-                test.copyTempestResults(venvPepper, TEST_TEMPEST_TARGET)
-            }
+            if (common.validInputParam('STACK_DELETE') && STACK_DELETE.toBoolean() == true) {
+                stage('Trigger cleanup job') {
+                    common.errorMsg('Stack cleanup job triggered')
+                    build(job: STACK_CLEANUP_JOB, parameters: [
+                        [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
+                        [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_URL', value: OPENSTACK_API_URL],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_CREDENTIALS', value: OPENSTACK_API_CREDENTIALS],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_DOMAIN', value: OPENSTACK_API_PROJECT_DOMAIN],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_ID', value: OPENSTACK_API_PROJECT_ID],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_USER_DOMAIN', value: OPENSTACK_API_USER_DOMAIN],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_CLIENT', value: OPENSTACK_API_CLIENT],
+                        [$class: 'StringParameterValue', name: 'OPENSTACK_API_VERSION', value: OPENSTACK_API_VERSION]
+                    ])
+                }
+            } else {
+                if (currentBuild.result == 'FAILURE') {
+                    common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
 
-            stage('Archive rally artifacts') {
-                test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
-            }
-        }
-
-
-        if (common.checkContains('STACK_TEST', 'ceph')) {
-            stage('Run infra tests') {
-                sleep(120)
-                def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
-                salt.cmdRun(venvPepper, 'I@salt:master', cmd, false)
-                writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper, 'I@salt:master', '/root/report.xml'))
-                junit(keepLongStdio: true, testResults: 'report.xml')
-            }
-        }
-
-        if (common.checkContains('STACK_TEST', 'opencontrail')) {
-            stage('Run opencontrail tests') {
-                def opencontrail_tests_dir = "/opt/opencontrail_test/fuel-plugin-contrail/plugin_test/vapor/"
-                def report_dir = "/opt/opencontrail-test-report/"
-                def cmd = ". ${opencontrail_tests_dir}exports.sh && " +
-                          "cd ${opencontrail_tests_dir} && " +
-                          "py.test --junit-xml=${report_dir}report.xml" +
-                          " --html=${report_dir}report.html -v vapor/tests/ -k 'not destructive' "
-
-                salt.runSaltProcessStep(venvPepper, 'cfg*', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(venvPepper, 'I@opencontrail:test' , 'opencontrail.test' , true)
-
-                salt.cmdRun(venvPepper, 'I@opencontrail:test', cmd, false)
-
-                writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper,
-                          'I@opencontrail:test', "${report_dir}report.xml"))
-                junit(keepLongStdio: true, testResults: 'report.xml')
-            }
-        }
-
-
-        stage('Finalize') {
-            if (common.checkContains('STACK_INSTALL', 'finalize')) {
-                salt.runSaltProcessStep(venvPepper, '*', 'state.apply', [], null, true)
-            }
-
-            outputsPretty = common.prettify(outputs)
-            print(outputsPretty)
-            writeFile(file: 'outputs.json', text: outputsPretty)
-            archiveArtifacts(artifacts: 'outputs.json')
-        }
-
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
-    } finally {
-
-
-        //
-        // Clean
-        //
-
-        if (common.validInputParam('STACK_NAME')) {
-            // send notification
-            common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
-        }
-
-        if (common.validInputParam('STACK_DELETE') && STACK_DELETE.toBoolean() == true) {
-            stage('Trigger cleanup job') {
-                common.errorMsg('Stack cleanup job triggered')
-                build(job: STACK_CLEANUP_JOB, parameters: [
-                    [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
-                    [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_URL', value: OPENSTACK_API_URL],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_CREDENTIALS', value: OPENSTACK_API_CREDENTIALS],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_DOMAIN', value: OPENSTACK_API_PROJECT_DOMAIN],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_ID', value: OPENSTACK_API_PROJECT_ID],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_USER_DOMAIN', value: OPENSTACK_API_USER_DOMAIN],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_CLIENT', value: OPENSTACK_API_CLIENT],
-                    [$class: 'StringParameterValue', name: 'OPENSTACK_API_VERSION', value: OPENSTACK_API_VERSION]
-                ])
-            }
-        } else {
-            if (currentBuild.result == 'FAILURE') {
-                common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
-
-                if (common.validInputParam('SALT_MASTER_URL')) {
-                    common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
+                    if (common.validInputParam('SALT_MASTER_URL')) {
+                        common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
+                    }
                 }
             }
         }
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index 781f5b4..4952502 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -30,150 +30,152 @@
 def remote_artifacts_dir = '/root/qa_results/'
 def current_target_node = ''
 def tempest_result = ''
-node() {
-    def num_retries = Integer.parseInt(RETRY_CHECK_STATUS)
-    try {
-        stage('Initialization') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            validate.runBasicContainer(saltMaster, TEMPEST_TARGET_NODE, TEST_IMAGE)
-            sh "rm -rf ${artifacts_dir}"
-            salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
-            validate.configureContainer(saltMaster, TEMPEST_TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO)
-        }
-
-        stage('Initial env check') {
-            sh "mkdir -p ${artifacts_dir}"
-            tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_initial")
-            validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
-            if (tempest_result != "finished") {
-                currentBuild.result = "FAILURE"
-                throw new Exception("Tempest tests failed")
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        def num_retries = Integer.parseInt(RETRY_CHECK_STATUS)
+        try {
+            stage('Initialization') {
+                saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                validate.runBasicContainer(saltMaster, TEMPEST_TARGET_NODE, TEST_IMAGE)
+                sh "rm -rf ${artifacts_dir}"
+                salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
+                validate.configureContainer(saltMaster, TEMPEST_TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO)
             }
-        }
 
-        stage('Soft Shutdown') {
-            if (MANUAL_CONFIRMATION.toBoolean() == true) {
-                stage('Ask for manual confirmation') {
-                    input message: "Are you sure you want to shutdown current vip node?"
+            stage('Initial env check') {
+                sh "mkdir -p ${artifacts_dir}"
+                tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_initial")
+                validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
+                if (tempest_result != "finished") {
+                    currentBuild.result = "FAILURE"
+                    throw new Exception("Tempest tests failed")
                 }
             }
-            current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
-            common.warningMsg("Shutdown current vip node ${current_target_node}")
-            validate.shutdown_vm_node(saltMaster, current_target_node, 'soft_shutdown')
-        }
-        stage('Check during shutdown') {
-            tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_shutdown")
-            validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
-            if (tempest_result != "finished") {
-                currentBuild.result = "FAILURE"
-                throw new Exception("Tempest tests failed")
-            }
-        }
-        stage('Power on') {
-            common.infoMsg('Powering on node')
-            kvm = validate.locate_node_on_kvm(saltMaster, current_target_node)
-            salt.cmdRun(saltMaster, kvm, "virsh start ${current_target_node}")
-            common.infoMsg("Checking that node is UP")
-            status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
-            if (status == null) {
-                throw new Exception("Node ${current_target_node} cannot start")
-            }
-        }
-        stage('Check after shutdown') {
-            tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_shutdown")
-            validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
-            if (tempest_result != "finished") {
-                currentBuild.result = "FAILURE"
-                throw new Exception("Tempest tests failed")
-            }
-            sleep 15
-        }
 
-        stage('Hard Shutdown') {
-            if (MANUAL_CONFIRMATION.toBoolean() == true) {
-                stage('Ask for manual confirmation') {
-                    input message: "Are you sure you want to hard shutdown current vip node?"
+            stage('Soft Shutdown') {
+                if (MANUAL_CONFIRMATION.toBoolean() == true) {
+                    stage('Ask for manual confirmation') {
+                        input message: "Are you sure you want to shutdown current vip node?"
+                    }
+                }
+                current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
+                common.warningMsg("Shutdown current vip node ${current_target_node}")
+                validate.shutdown_vm_node(saltMaster, current_target_node, 'soft_shutdown')
+            }
+            stage('Check during shutdown') {
+                tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_shutdown")
+                validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
+                if (tempest_result != "finished") {
+                    currentBuild.result = "FAILURE"
+                    throw new Exception("Tempest tests failed")
                 }
             }
-            salt.cmdRun(saltMaster, current_target_node, "service keepalived stop")
-            current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
-            common.warningMsg("Shutdown current vip node ${current_target_node}")
-            validate.shutdown_vm_node(saltMaster, current_target_node, 'hard_shutdown')
-        }
-        stage('Check during hard shutdown') {
-            tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_hard_shutdown")
-            validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
-            if (tempest_result != "finished") {
-                currentBuild.result = "FAILURE"
-                throw new Exception("Tempest tests failed")
-            }
-        }
-        stage('Power on') {
-            common.infoMsg('Powering on node')
-            kvm = validate.locate_node_on_kvm(saltMaster, current_target_node)
-            salt.cmdRun(saltMaster, kvm, "virsh start ${current_target_node}")
-            common.infoMsg("Checking that node is UP")
-            status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
-            if (status == null) {
-                throw new Exception("Command execution failed")
-            }
-            salt.cmdRun(saltMaster, TARGET_NODES, "service keepalived start")
-        }
-        stage('Check after hard shutdown') {
-            tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_hard_shutdown")
-            validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
-            if (tempest_result != "finished") {
-                currentBuild.result = "FAILURE"
-                throw new Exception("Tempest tests failed")
-            }
-            sleep 15
-        }
-
-        stage('Reboot') {
-            if (MANUAL_CONFIRMATION.toBoolean() == true) {
-                stage('Ask for manual confirmation') {
-                    input message: "Are you sure you want to reboot current vip node?"
+            stage('Power on') {
+                common.infoMsg('Powering on node')
+                kvm = validate.locate_node_on_kvm(saltMaster, current_target_node)
+                salt.cmdRun(saltMaster, kvm, "virsh start ${current_target_node}")
+                common.infoMsg("Checking that node is UP")
+                status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
+                if (status == null) {
+                    throw new Exception("Node ${current_target_node} cannot start")
                 }
             }
-            current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
-            common.warningMsg("Rebooting current vip node ${current_target_node}")
-            validate.shutdown_vm_node(saltMaster, current_target_node, 'reboot')
-            sleep 5
-        }
-        stage('Check during reboot') {
-            tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_reboot")
-            validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
-            if (tempest_result != "finished") {
-                currentBuild.result = "FAILURE"
-                throw new Exception("Tempest tests failed")
+            stage('Check after shutdown') {
+                tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_shutdown")
+                validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
+                if (tempest_result != "finished") {
+                    currentBuild.result = "FAILURE"
+                    throw new Exception("Tempest tests failed")
+                }
+                sleep 15
             }
-        }
-        stage('Check after reboot') {
-            common.warningMsg("Checking that node is UP")
-            status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
-            if (status == null) {
-                throw new Exception("Node ${current_target_node} cannot start")
-            }
-            tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after")
-            validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
-            if (tempest_result != "finished") {
-                currentBuild.result = "FAILURE"
-                throw new Exception("Tempest tests failed")
-            }
-        }
 
-        stage('Collect results') {
-            validate.addFiles(saltMaster, TEMPEST_TARGET_NODE, remote_artifacts_dir, artifacts_dir)
-            archiveArtifacts artifacts: "${artifacts_dir}/*"
+            stage('Hard Shutdown') {
+                if (MANUAL_CONFIRMATION.toBoolean() == true) {
+                    stage('Ask for manual confirmation') {
+                        input message: "Are you sure you want to hard shutdown current vip node?"
+                    }
+                }
+                salt.cmdRun(saltMaster, current_target_node, "service keepalived stop")
+                current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
+                common.warningMsg("Shutdown current vip node ${current_target_node}")
+                validate.shutdown_vm_node(saltMaster, current_target_node, 'hard_shutdown')
+            }
+            stage('Check during hard shutdown') {
+                tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_hard_shutdown")
+                validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
+                if (tempest_result != "finished") {
+                    currentBuild.result = "FAILURE"
+                    throw new Exception("Tempest tests failed")
+                }
+            }
+            stage('Power on') {
+                common.infoMsg('Powering on node')
+                kvm = validate.locate_node_on_kvm(saltMaster, current_target_node)
+                salt.cmdRun(saltMaster, kvm, "virsh start ${current_target_node}")
+                common.infoMsg("Checking that node is UP")
+                status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
+                if (status == null) {
+                    throw new Exception("Command execution failed")
+                }
+                salt.cmdRun(saltMaster, TARGET_NODES, "service keepalived start")
+            }
+            stage('Check after hard shutdown') {
+                tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_hard_shutdown")
+                validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
+                if (tempest_result != "finished") {
+                    currentBuild.result = "FAILURE"
+                    throw new Exception("Tempest tests failed")
+                }
+                sleep 15
+            }
+
+            stage('Reboot') {
+                if (MANUAL_CONFIRMATION.toBoolean() == true) {
+                    stage('Ask for manual confirmation') {
+                        input message: "Are you sure you want to reboot current vip node?"
+                    }
+                }
+                current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
+                common.warningMsg("Rebooting current vip node ${current_target_node}")
+                validate.shutdown_vm_node(saltMaster, current_target_node, 'reboot')
+                sleep 5
+            }
+            stage('Check during reboot') {
+                tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_reboot")
+                validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
+                if (tempest_result != "finished") {
+                    currentBuild.result = "FAILURE"
+                    throw new Exception("Tempest tests failed")
+                }
+            }
+            stage('Check after reboot') {
+                common.warningMsg("Checking that node is UP")
+                status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
+                if (status == null) {
+                    throw new Exception("Node ${current_target_node} cannot start")
+                }
+                tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after")
+                validate.openstack_cleanup(saltMaster, TEMPEST_TARGET_NODE)
+                if (tempest_result != "finished") {
+                    currentBuild.result = "FAILURE"
+                    throw new Exception("Tempest tests failed")
+                }
+            }
+
+            stage('Collect results') {
+                validate.addFiles(saltMaster, TEMPEST_TARGET_NODE, remote_artifacts_dir, artifacts_dir)
+                archiveArtifacts artifacts: "${artifacts_dir}/*"
+                if (DEBUG_MODE == 'false') {
+                    validate.runCleanup(saltMaster, TEMPEST_TARGET_NODE)
+                    salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
+                }
+            }
+        } finally {
             if (DEBUG_MODE == 'false') {
-                validate.runCleanup(saltMaster, TEMPEST_TARGET_NODE)
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
+                validate.runCleanup(saltMaster, TEMPEST_TARGET_NODE)
             }
         }
-    } finally {
-        if (DEBUG_MODE == 'false') {
-            salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
-            validate.runCleanup(saltMaster, TEMPEST_TARGET_NODE)
-        }
     }
 }
diff --git a/cvp-sanity.groovy b/cvp-sanity.groovy
index 53c044a..ed87cd6 100644
--- a/cvp-sanity.groovy
+++ b/cvp-sanity.groovy
@@ -15,24 +15,25 @@
 validate = new com.mirantis.mcp.Validate()
 
 def artifacts_dir = 'validation_artifacts/'
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try{
+            stage('Initialization') {
+                validate.prepareVenv(SANITY_TESTS_REPO, PROXY)
+            }
 
-node() {
-    try{
-        stage('Initialization') {
-            validate.prepareVenv(SANITY_TESTS_REPO, PROXY)
+            stage('Run Infra tests') {
+                sh "mkdir -p ${artifacts_dir}"
+                validate.runSanityTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, SANITY_TESTS_SET, artifacts_dir)
+            }
+            stage ('Publish results') {
+                archiveArtifacts artifacts: "${artifacts_dir}/*"
+                junit "${artifacts_dir}/*.xml"
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
         }
-
-        stage('Run Infra tests') {
-            sh "mkdir -p ${artifacts_dir}"
-            validate.runSanityTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, SANITY_TESTS_SET, artifacts_dir)
-        }
-        stage ('Publish results') {
-            archiveArtifacts artifacts: "${artifacts_dir}/*"
-            junit "${artifacts_dir}/*.xml"
-        }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
     }
 }
diff --git a/delete-broken-stacks-pipeline.groovy b/delete-broken-stacks-pipeline.groovy
index 85d7898..c68fe9e 100644
--- a/delete-broken-stacks-pipeline.groovy
+++ b/delete-broken-stacks-pipeline.groovy
@@ -15,33 +15,34 @@
 git = new com.mirantis.mk.Git()
 openstack = new com.mirantis.mk.Openstack()
 salt = new com.mirantis.mk.Salt()
+timeout(time: 12, unit: 'HOURS') {
+    node {
 
-node {
+        // connection objects
+        def openstackCloud
+        // value defaults
+        def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+        def openstackEnv = "${env.WORKSPACE}/venv"
 
-    // connection objects
-    def openstackCloud
-    // value defaults
-    def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-    def openstackEnv = "${env.WORKSPACE}/venv"
-
-    stage('Install OpenStack env') {
-        openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-    }
-
-    stage('Connect to OpenStack cloud') {
-        openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
-        openstack.getKeystoneToken(openstackCloud, openstackEnv)
-    }
-
-    stage('Delete broken Heat stacks') {
-        // get failed stacks
-        def brokenStacks = []
-        brokenStacks.addAll(openstack.getStacksWithStatus(openstackCloud, "CREATE_FAILED", openstackEnv))
-        brokenStacks.addAll(openstack.getStacksWithStatus(openstackCloud, "DELETE_FAILED", openstackEnv))
-        for(int i=0;i<brokenStacks.size();i++){
-            common.infoMsg("Deleting Heat stack " + brokenStacks[i])
-            openstack.deleteHeatStack(openstackCloud, brokenStacks[i], openstackEnv)
+        stage('Install OpenStack env') {
+            openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
         }
-    }
 
+        stage('Connect to OpenStack cloud') {
+            openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+            openstack.getKeystoneToken(openstackCloud, openstackEnv)
+        }
+
+        stage('Delete broken Heat stacks') {
+            // get failed stacks
+            def brokenStacks = []
+            brokenStacks.addAll(openstack.getStacksWithStatus(openstackCloud, "CREATE_FAILED", openstackEnv))
+            brokenStacks.addAll(openstack.getStacksWithStatus(openstackCloud, "DELETE_FAILED", openstackEnv))
+            for(int i=0;i<brokenStacks.size();i++){
+                common.infoMsg("Deleting Heat stack " + brokenStacks[i])
+                openstack.deleteHeatStack(openstackCloud, brokenStacks[i], openstackEnv)
+            }
+        }
+
+    }
 }
diff --git a/deploy-k8s-deployments.groovy b/deploy-k8s-deployments.groovy
index 5989dea..968e4c9 100644
--- a/deploy-k8s-deployments.groovy
+++ b/deploy-k8s-deployments.groovy
@@ -4,46 +4,47 @@
 def pepperEnv = "pepperEnv"
 
 targetExpression = TARGET_MINIONS ? TARGET_MINIONS : "E@kvm01.*"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-node() {
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-    common.infoMsg("Enforcing kubernetes state..")
-    stage("Update k8s control") {
-        salt.enforceState(
-            pepperEnv,
-            targetExpression,
-            'kubernetes.control',
-            true
-        )
-    }
-    stage("Update components") {
-        common.infoMsg("Setting up components..")
-        def extraCommand
-        try {
-            extraCommand = EXTRA_COMMAND
-        } catch (Throwable e) {
-            extraCommand = null
-        }
-
-        if (extraCommand) {
-            salt.cmdRun(
+        common.infoMsg("Enforcing kubernetes state..")
+        stage("Update k8s control") {
+            salt.enforceState(
                 pepperEnv,
                 targetExpression,
-                extraCommand
+                'kubernetes.control',
+                true
             )
         }
-        out = salt.cmdRun(
-            pepperEnv,
-            targetExpression,
-            '/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done; jobs=$(hyperkube kubectl get jobs -o name); find /srv/kubernetes/jobs -type f -name "*.yml" | while read i; do name=$(grep "name:" $i | head -1 | awk "{print $NF}"); echo $jobs|grep $name >/dev/null || (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x);done\''
-        )
-        for (entry in out['return']) {
-            for (node in entry) {
-                if (node.value =~ /Command failed/) {
-                    error("$node.key: $node.value")
-                } else {
-                    println "$node.key: $node.value"
+        stage("Update components") {
+            common.infoMsg("Setting up components..")
+            def extraCommand
+            try {
+                extraCommand = EXTRA_COMMAND
+            } catch (Throwable e) {
+                extraCommand = null
+            }
+
+            if (extraCommand) {
+                salt.cmdRun(
+                    pepperEnv,
+                    targetExpression,
+                    extraCommand
+                )
+            }
+            out = salt.cmdRun(
+                pepperEnv,
+                targetExpression,
+                '/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done; jobs=$(hyperkube kubectl get jobs -o name); find /srv/kubernetes/jobs -type f -name "*.yml" | while read i; do name=$(grep "name:" $i | head -1 | awk "{print $NF}"); echo $jobs|grep $name >/dev/null || (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x);done\''
+            )
+            for (entry in out['return']) {
+                for (node in entry) {
+                    if (node.value =~ /Command failed/) {
+                        error("$node.key: $node.value")
+                    } else {
+                        println "$node.key: $node.value"
+                    }
                 }
             }
         }
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index cb0e3f9..2ec0ab6 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -15,60 +15,62 @@
 def gerrit = new com.mirantis.mk.Gerrit()
 def git = new com.mirantis.mk.Git()
 def dockerLib = new com.mirantis.mk.Docker()
-node("docker") {
-  def workspace = common.getWorkspace()
-  def imageTagsList = IMAGE_TAGS.tokenize(" ")
-  try{
+timeout(time: 12, unit: 'HOURS') {
+  node("docker") {
+    def workspace = common.getWorkspace()
+    def imageTagsList = IMAGE_TAGS.tokenize(" ")
+    try{
 
-    def buildArgs = []
-    try {
-      buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
+      def buildArgs = []
+      try {
+        buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
+      } catch (Throwable e) {
+        buildArgs = []
+      }
+      def dockerApp
+      docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
+        stage("checkout") {
+           git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
+        }
+
+        if (IMAGE_BRANCH == "master") {
+          try {
+            def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
+            def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
+            imageTagsList << tag
+            revision = revision ? revision : "0"
+            if(Integer.valueOf(revision) > 0){
+              imageTagsList << "${tag}-${revision}"
+            }
+            if (!imageTagsList.contains("latest")) {
+              imageTagsList << "latest"
+            }
+          } catch (Exception e) {
+            common.infoMsg("Impossible to find any tag")
+          }
+        }
+
+        stage("build") {
+          common.infoMsg("Building docker image ${IMAGE_NAME}")
+          dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
+          if(!dockerApp){
+            throw new Exception("Docker build image failed")
+          }
+        }
+        stage("upload to docker hub"){
+          for(int i=0;i<imageTagsList.size();i++){
+            common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]}")
+            dockerApp.push(imageTagsList[i])
+          }
+        }
+      }
     } catch (Throwable e) {
-      buildArgs = []
+       // If there was an error or exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
     }
-    def dockerApp
-    docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
-      stage("checkout") {
-         git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
-      }
-
-      if (IMAGE_BRANCH == "master") {
-        try {
-          def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
-          def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
-          imageTagsList << tag
-          revision = revision ? revision : "0"
-          if(Integer.valueOf(revision) > 0){
-            imageTagsList << "${tag}-${revision}"
-          }
-          if (!imageTagsList.contains("latest")) {
-            imageTagsList << "latest"
-          }
-        } catch (Exception e) {
-          common.infoMsg("Impossible to find any tag")
-        }
-      }
-
-      stage("build") {
-        common.infoMsg("Building docker image ${IMAGE_NAME}")
-        dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
-        if(!dockerApp){
-          throw new Exception("Docker build image failed")
-        }
-      }
-      stage("upload to docker hub"){
-        for(int i=0;i<imageTagsList.size();i++){
-          common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]}")
-          dockerApp.push(imageTagsList[i])
-        }
-      }
-    }
-  } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-     throw e
-  } finally {
-     common.sendNotification(currentBuild.result,"",["slack"])
   }
 }
diff --git a/docker-cleanup-pipeline.groovy b/docker-cleanup-pipeline.groovy
index 677efdf..710b1bb 100644
--- a/docker-cleanup-pipeline.groovy
+++ b/docker-cleanup-pipeline.groovy
@@ -12,22 +12,23 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
-
-node{
-  def saltMaster;
-  stage('Setup virtualenv for Pepper') {
-    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-  }
-  stage("Clean old containers"){
-    salt.cmdRun(pepperEnv, 'I@jenkins:slave', """
-        docker ps --format='{{.ID}}' | xargs -n 1 -r docker inspect \\
-        -f '{{.ID}} {{.State.Running}} {{.State.StartedAt}}' \\
-        | awk '\$2 == "true" && \$3 <= "'\$(date -d '${TEST_DATE_STRING}' -Ins --utc \\
-        | sed 's/+0000/Z/')'" { print \$1 }' \\
-        | xargs -r docker rm -f
-        """, false)
-  }
-  stage("Run docker system prune"){
-    salt.cmdRun(pepperEnv, 'I@jenkins:slave', "docker system prune -f")
+timeout(time: 12, unit: 'HOURS') {
+  node{
+    def saltMaster;
+    stage('Setup virtualenv for Pepper') {
+      python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+    stage("Clean old containers"){
+      salt.cmdRun(pepperEnv, 'I@jenkins:slave', """
+          docker ps --format='{{.ID}}' | xargs -n 1 -r docker inspect \\
+          -f '{{.ID}} {{.State.Running}} {{.State.StartedAt}}' \\
+          | awk '\$2 == "true" && \$3 <= "'\$(date -d '${TEST_DATE_STRING}' -Ins --utc \\
+          | sed 's/+0000/Z/')'" { print \$1 }' \\
+          | xargs -r docker rm -f
+          """, false)
+    }
+    stage("Run docker system prune"){
+      salt.cmdRun(pepperEnv, 'I@jenkins:slave', "docker system prune -f")
+    }
   }
 }
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 8f0373c..08ac439 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -25,30 +25,31 @@
         throw new IllegalArgumentException("Wrong format of image name.")
     }
 }
-
-node("docker") {
-    try {
-        stage("Mirror Docker Images"){
-            def creds = common.getPasswordCredentials(TARGET_REGISTRY_CREDENTIALS_ID)
-            sh "docker login --username=${creds.username} --password=${creds.password.toString()} ${REGISTRY_URL}"
-            def images = IMAGE_LIST.tokenize('\n')
-            def imageName, imagePath, targetRegistry, imageArray
-            for (image in images){
-                if(image.trim().indexOf(' ') == -1){
-                    throw new IllegalArgumentException("Wrong format of image and target repository input")
+timeout(time: 12, unit: 'HOURS') {
+    node("docker") {
+        try {
+            stage("Mirror Docker Images"){
+                def creds = common.getPasswordCredentials(TARGET_REGISTRY_CREDENTIALS_ID)
+                sh "docker login --username=${creds.username} --password=${creds.password.toString()} ${REGISTRY_URL}"
+                def images = IMAGE_LIST.tokenize('\n')
+                def imageName, imagePath, targetRegistry, imageArray
+                for (image in images){
+                    if(image.trim().indexOf(' ') == -1){
+                        throw new IllegalArgumentException("Wrong format of image and target repository input")
+                    }
+                    imageArray = image.trim().tokenize(' ')
+                    imagePath = imageArray[0]
+                    targetRegistry = imageArray[1]
+                    imageName = getImageName(imagePath)
+                    sh """docker pull ${imagePath}
+                          docker tag ${imagePath} ${targetRegistry}/${imageName}:${IMAGE_TAG}
+                          docker push ${targetRegistry}/${imageName}:${IMAGE_TAG}"""
                 }
-                imageArray = image.trim().tokenize(' ')
-                imagePath = imageArray[0]
-                targetRegistry = imageArray[1]
-                imageName = getImageName(imagePath)
-                sh """docker pull ${imagePath}
-                      docker tag ${imagePath} ${targetRegistry}/${imageName}:${IMAGE_TAG}
-                      docker push ${targetRegistry}/${imageName}:${IMAGE_TAG}"""
             }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
         }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
     }
 }
\ No newline at end of file
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index 1b62b53..e42524b 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -8,68 +8,70 @@
 def common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 def ssh = new com.mirantis.mk.Ssh()
-node("python") {
-  try{
-    // test if change is not already merged
-    ssh.prepareSshAgentKey(CREDENTIALS_ID)
-    ssh.ensureKnownHosts(GERRIT_HOST)
-    def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
-    def doSubmit = false
-    def giveVerify = false
-    stage("test") {
-      if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")){
-        // test max CodeReview
-        if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Code-Review", "+")){
-          doSubmit = true
-          def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
-          def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
-          def jobsNamespace = JOBS_NAMESPACE
-          def plural_namespaces = ['salt-formulas', 'salt-models']
-          // remove plural s on the end of job namespace
-          if (JOBS_NAMESPACE in plural_namespaces){
-            jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
-          }
-          // salt-formulas tests have -latest on end of the name
-          if(JOBS_NAMESPACE.equals("salt-formulas")){
-            gerritProject=gerritProject+"-latest"
-          }
-          def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
-          if (_jobExists(testJob)) {
-            common.infoMsg("Test job ${testJob} found, running")
-            def patchsetVerified =  gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified", "+")
-            build job: testJob, parameters: [
-              [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
-              [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
-            ]
-            giveVerify = true
+timeout(time: 12, unit: 'HOURS') {
+  node("python") {
+    try{
+      // test if change is not already merged
+      ssh.prepareSshAgentKey(CREDENTIALS_ID)
+      ssh.ensureKnownHosts(GERRIT_HOST)
+      def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
+      def doSubmit = false
+      def giveVerify = false
+      stage("test") {
+        if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")){
+          // test max CodeReview
+          if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Code-Review", "+")){
+            doSubmit = true
+            def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
+            def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
+            def jobsNamespace = JOBS_NAMESPACE
+            def plural_namespaces = ['salt-formulas', 'salt-models']
+            // remove plural s on the end of job namespace
+            if (JOBS_NAMESPACE in plural_namespaces){
+              jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+            }
+            // salt-formulas tests have -latest on end of the name
+            if(JOBS_NAMESPACE.equals("salt-formulas")){
+              gerritProject=gerritProject+"-latest"
+            }
+            def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+            if (_jobExists(testJob)) {
+              common.infoMsg("Test job ${testJob} found, running")
+              def patchsetVerified =  gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified", "+")
+              build job: testJob, parameters: [
+                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+              ]
+              giveVerify = true
+            } else {
+              common.infoMsg("Test job ${testJob} not found")
+            }
           } else {
-            common.infoMsg("Test job ${testJob} not found")
+            common.errorMsg("Change don't have a CodeReview, skipping gate")
           }
         } else {
-          common.errorMsg("Change don't have a CodeReview, skipping gate")
+          common.infoMsg("Test job skipped")
         }
-      } else {
-        common.infoMsg("Test job skipped")
       }
-    }
-    stage("submit review"){
-      if(gerritChange.status == "MERGED"){
-        common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
-      }else if(doSubmit){
-        if(giveVerify){
-          common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
-          ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
-        }else{
-          ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+      stage("submit review"){
+        if(gerritChange.status == "MERGED"){
+          common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+        }else if(doSubmit){
+          if(giveVerify){
+            common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
+            ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+          }else{
+            ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+          }
+          common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
         }
-        common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
       }
+    } catch (Throwable e) {
+       // If there was an error or exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
     }
-  } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-     throw e
   }
 }
 
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 5dbbed4..6b76088 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -17,231 +17,232 @@
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
 ssh = new com.mirantis.mk.Ssh()
 
+timeout(time: 12, unit: 'HOURS') {
+    node("python&&docker") {
+        def templateEnv = "${env.WORKSPACE}/template"
+        def modelEnv = "${env.WORKSPACE}/model"
+        def testEnv = "${env.WORKSPACE}/test"
+        def pipelineEnv = "${env.WORKSPACE}/pipelines"
 
-node("python&&docker") {
-    def templateEnv = "${env.WORKSPACE}/template"
-    def modelEnv = "${env.WORKSPACE}/model"
-    def testEnv = "${env.WORKSPACE}/test"
-    def pipelineEnv = "${env.WORKSPACE}/pipelines"
-
-    try {
-        def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
-        def clusterDomain = templateContext.default_context.cluster_domain
-        def clusterName = templateContext.default_context.cluster_name
-        def saltMaster = templateContext.default_context.salt_master_hostname
-        def cutterEnv = "${env.WORKSPACE}/cutter"
-        def jinjaEnv = "${env.WORKSPACE}/jinja"
-        def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
-        def targetBranch = "feature/${clusterName}"
-        def templateBaseDir = "${env.WORKSPACE}/template"
-        def templateDir = "${templateEnv}/template/dir"
-        def templateOutputDir = templateBaseDir
-        def user
-        wrap([$class: 'BuildUser']) {
-            user = env.BUILD_USER_ID
-        }
-
-        currentBuild.description = clusterName
-        print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
-
-        stage ('Download Cookiecutter template') {
-            if (COOKIECUTTER_TEMPLATE_BRANCH.startsWith('refs/changes/')) {
-                git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, 'master', COOKIECUTTER_TEMPLATE_CREDENTIALS)
-
-                dir(templateEnv) {
-                    ssh.agentSh("git fetch ${COOKIECUTTER_TEMPLATE_URL} ${COOKIECUTTER_TEMPLATE_BRANCH} && git checkout FETCH_HEAD")
-                }
-            } else {
-                git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, COOKIECUTTER_TEMPLATE_CREDENTIALS)
+        try {
+            def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+            def clusterDomain = templateContext.default_context.cluster_domain
+            def clusterName = templateContext.default_context.cluster_name
+            def saltMaster = templateContext.default_context.salt_master_hostname
+            def cutterEnv = "${env.WORKSPACE}/cutter"
+            def jinjaEnv = "${env.WORKSPACE}/jinja"
+            def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+            def targetBranch = "feature/${clusterName}"
+            def templateBaseDir = "${env.WORKSPACE}/template"
+            def templateDir = "${templateEnv}/template/dir"
+            def templateOutputDir = templateBaseDir
+            def user
+            wrap([$class: 'BuildUser']) {
+                user = env.BUILD_USER_ID
             }
 
-        }
+            currentBuild.description = clusterName
+            print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
 
-        stage ('Create empty reclass model') {
-            dir(path: modelEnv) {
-                sh "rm -rfv .git"
-                sh "git init"
+            stage ('Download Cookiecutter template') {
+                if (COOKIECUTTER_TEMPLATE_BRANCH.startsWith('refs/changes/')) {
+                    git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, 'master', COOKIECUTTER_TEMPLATE_CREDENTIALS)
 
-                if (SHARED_RECLASS_URL != '') {
-                    ssh.agentSh "git submodule add \"${SHARED_RECLASS_URL}\" \"classes/system\""
+                    dir(templateEnv) {
+                        ssh.agentSh("git fetch ${COOKIECUTTER_TEMPLATE_URL} ${COOKIECUTTER_TEMPLATE_BRANCH} && git checkout FETCH_HEAD")
+                    }
+                } else {
+                    git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, COOKIECUTTER_TEMPLATE_CREDENTIALS)
+                }
 
-                    def mcpVersion = templateContext['default_context']['mcp_version']
-                    if(mcpVersion != "stable" && mcpVersion != "nightly" && mcpVersion != "testing"){
-                        ssh.agentSh "cd \"classes/system\";git fetch --tags;git checkout ${mcpVersion}"
+            }
+
+            stage ('Create empty reclass model') {
+                dir(path: modelEnv) {
+                    sh "rm -rfv .git"
+                    sh "git init"
+
+                    if (SHARED_RECLASS_URL != '') {
+                        ssh.agentSh "git submodule add \"${SHARED_RECLASS_URL}\" \"classes/system\""
+
+                        def mcpVersion = templateContext['default_context']['mcp_version']
+                        if(mcpVersion != "stable" && mcpVersion != "nightly" && mcpVersion != "testing"){
+                            ssh.agentSh "cd \"classes/system\";git fetch --tags;git checkout ${mcpVersion}"
+                        }
+
+                        git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
+                    }
+                }
+            }
+
+            def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
+            for (product in productList) {
+
+                // get templateOutputDir and productDir
+                if (product.startsWith("stacklight")) {
+                    templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+
+                    def stacklightVersion
+                    try {
+                        stacklightVersion = templateContext.default_context['stacklight_version']
+                    } catch (Throwable e) {
+                        common.warningMsg('Stacklight version loading failed')
                     }
 
-                    git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
-                }
-            }
-        }
+                    if (stacklightVersion) {
+                        productDir = "stacklight" + stacklightVersion
+                    } else {
+                        productDir = "stacklight1"
+                    }
 
-        def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
-        for (product in productList) {
-
-            // get templateOutputDir and productDir
-            if (product.startsWith("stacklight")) {
-                templateOutputDir = "${env.WORKSPACE}/output/stacklight"
-
-                def stacklightVersion
-                try {
-                    stacklightVersion = templateContext.default_context['stacklight_version']
-                } catch (Throwable e) {
-                    common.warningMsg('Stacklight version loading failed')
-                }
-
-                if (stacklightVersion) {
-                    productDir = "stacklight" + stacklightVersion
                 } else {
-                    productDir = "stacklight1"
+                    templateOutputDir = "${env.WORKSPACE}/output/${product}"
+                    productDir = product
                 }
 
-            } else {
-                templateOutputDir = "${env.WORKSPACE}/output/${product}"
-                productDir = product
-            }
+                if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+                    && templateContext.default_context["${product}_enabled"].toBoolean())) {
 
-            if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                && templateContext.default_context["${product}_enabled"].toBoolean())) {
+                    templateDir = "${templateEnv}/cluster_product/${productDir}"
+                    common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
 
-                templateDir = "${templateEnv}/cluster_product/${productDir}"
-                common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+                    sh "rm -rf ${templateOutputDir} || true"
+                    sh "mkdir -p ${templateOutputDir}"
+                    sh "mkdir -p ${outputDestination}"
 
-                sh "rm -rf ${templateOutputDir} || true"
-                sh "mkdir -p ${templateOutputDir}"
-                sh "mkdir -p ${outputDestination}"
-
-                python.setupCookiecutterVirtualenv(cutterEnv)
-                python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
-                sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-            } else {
-                common.warningMsg("Product " + product + " is disabled")
-            }
-        }
-
-        stage('Generate new SaltMaster node') {
-            def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
-            def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
-  _param:
-    linux_system_codename: xenial
-    reclass_data_revision: master
-  linux:
-    system:
-      name: ${saltMaster}
-      domain: ${clusterDomain}
-"""
-            sh "mkdir -p ${modelEnv}/nodes/"
-            writeFile(file: nodeFile, text: nodeString)
-
-            git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
-        }
-
-        stage("Test") {
-            if (SHARED_RECLASS_URL != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
-                sh("cp -r ${modelEnv} ${testEnv}")
-                saltModelTesting.setupAndTestNode("${saltMaster}.${clusterDomain}", "", testEnv)
-            }
-        }
-
-        stage("Generate config drives") {
-            // apt package genisoimage is required for this stage
-
-            // download create-config-drive
-            // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
-            def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/create_config_drive.sh"
-            def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/master_config.sh"
-
-            sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
-            sh "wget -O user_data.sh ${user_data_script_url}"
-
-            sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
-            sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
-            args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
-            // load data from model
-            def smc = [:]
-            smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
-            smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-            smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
-            smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-            smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
-            if (templateContext['default_context']['local_repositories'] == 'True'){
-                smc['PIPELINES_FROM_ISO'] = 'false'
-                smc['PIPELINE_REPO_URL'] = 'http://' + templateContext['default_context']['aptly_server_deploy_address'] + ':8088'
-            }
-            if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
-                if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
-                    smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+                    python.setupCookiecutterVirtualenv(cutterEnv)
+                    python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
+                    sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
                 } else {
-                    smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
-                    smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+                    common.warningMsg("Product " + product + " is disabled")
                 }
             }
 
-            for (i in common.entries(smc)) {
-                sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
+            stage('Generate new SaltMaster node') {
+                def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
+                def nodeString = """classes:
+    - cluster.${clusterName}.infra.config
+    parameters:
+      _param:
+        linux_system_codename: xenial
+        reclass_data_revision: master
+      linux:
+        system:
+          name: ${saltMaster}
+          domain: ${clusterDomain}
+    """
+                sh "mkdir -p ${modelEnv}/nodes/"
+                writeFile(file: nodeFile, text: nodeString)
+
+                git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
             }
 
-            // create cfg config-drive
-            sh "./create-config-drive ${args}"
-            sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+            stage("Test") {
+                if (SHARED_RECLASS_URL != "" && TEST_MODEL && TEST_MODEL.toBoolean()) {
+                    sh("cp -r ${modelEnv} ${testEnv}")
+                    saltModelTesting.setupAndTestNode("${saltMaster}.${clusterDomain}", "", testEnv)
+                }
+            }
 
-            // save cfg iso to artifacts
-            archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+            stage("Generate config drives") {
+                // apt package genisoimage is required for this stage
 
-            if (templateContext['default_context']['local_repositories'] == 'True'){
-                def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
-                def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
-                sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
+                // download create-config-drive
+                // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+                def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/create_config_drive.sh"
+                def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/master_config.sh"
 
-                def smc_apt = [:]
-                smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
-                smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
-                smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
-                smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+                sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+                sh "wget -O user_data.sh ${user_data_script_url}"
 
-                for (i in common.entries(smc_apt)) {
-                    sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+                sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+                sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+                args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
+                // load data from model
+                def smc = [:]
+                smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
+                smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+                smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+                smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+                smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+                if (templateContext['default_context']['local_repositories'] == 'True'){
+                    smc['PIPELINES_FROM_ISO'] = 'false'
+                    smc['PIPELINE_REPO_URL'] = 'http://' + templateContext['default_context']['aptly_server_deploy_address'] + ':8088'
+                }
+                if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
+                    if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
+                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+                    } else {
+                        smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+                        smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+                    }
                 }
 
-                // create apt config-drive
-                sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
-                sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+                for (i in common.entries(smc)) {
+                    sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
+                }
 
-                // save apt iso to artifacts
-                archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+                // create cfg config-drive
+                sh "./create-config-drive ${args}"
+                sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+
+                // save cfg iso to artifacts
+                archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+                if (templateContext['default_context']['local_repositories'] == 'True'){
+                    def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+                    def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
+                    sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
+
+                    def smc_apt = [:]
+                    smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+                    smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
+                    smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+                    smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+                    for (i in common.entries(smc_apt)) {
+                        sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+                    }
+
+                    // create apt config-drive
+                    sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+                    sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+                    // save apt iso to artifacts
+                    archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+                }
             }
-        }
 
-        stage ('Save changes reclass model') {
-            sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
-            archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
+            stage ('Save changes reclass model') {
+                sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
+                archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
 
 
-            if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
-                 emailext(to: EMAIL_ADDRESS,
-                          attachmentsPattern: "output-${clusterName}/*",
-                          body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                          subject: "Your Salt model ${clusterName}")
+                if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+                     emailext(to: EMAIL_ADDRESS,
+                              attachmentsPattern: "output-${clusterName}/*",
+                              body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+                              subject: "Your Salt model ${clusterName}")
+                }
+                dir("output-${clusterName}"){
+                    deleteDir()
+                }
             }
-            dir("output-${clusterName}"){
-                deleteDir()
-            }
-        }
 
-    } catch (Throwable e) {
-         // If there was an error or exception thrown, the build failed
-         currentBuild.result = "FAILURE"
-         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-         throw e
-    } finally {
-        stage ('Clean workspace directories') {
-            sh(returnStatus: true, script: "rm -rf ${templateEnv}")
-            sh(returnStatus: true, script: "rm -rf ${modelEnv}")
-            sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
+        } catch (Throwable e) {
+             // If there was an error or exception thrown, the build failed
+             currentBuild.result = "FAILURE"
+             currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+             throw e
+        } finally {
+            stage ('Clean workspace directories') {
+                sh(returnStatus: true, script: "rm -rf ${templateEnv}")
+                sh(returnStatus: true, script: "rm -rf ${modelEnv}")
+                sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
+            }
+             // common.sendNotification(currentBuild.result,"",["slack"])
         }
-         // common.sendNotification(currentBuild.result,"",["slack"])
     }
 }
diff --git a/git-merge-branches-pipeline.groovy b/git-merge-branches-pipeline.groovy
index 8293f87..d1c3ee2 100644
--- a/git-merge-branches-pipeline.groovy
+++ b/git-merge-branches-pipeline.groovy
@@ -9,21 +9,23 @@
 
 def common = new com.mirantis.mk.Common()
 def git = new com.mirantis.mk.Git()
-node {
-  try{
-    stage("checkout") {
-      git.checkoutGitRepository('repo', REPO_URL, TARGET_BRANCH, IMAGE_CREDENTIALS_ID)
-    }
-    stage("merge") {
-      dir("repo"){
-        sh("git fetch origin/${SOURCE_BRANCH} && git merge ${SOURCE_BRANCH} && git push origin ${TARGET_BRANCH}")
+timeout(time: 12, unit: 'HOURS') {
+  node {
+    try{
+      stage("checkout") {
+        git.checkoutGitRepository('repo', REPO_URL, TARGET_BRANCH, IMAGE_CREDENTIALS_ID)
       }
+      stage("merge") {
+        dir("repo"){
+          sh("git fetch origin/${SOURCE_BRANCH} && git merge ${SOURCE_BRANCH} && git push origin ${TARGET_BRANCH}")
+        }
+      }
+    } catch (Throwable e) {
+       // If there was an error or exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
     }
-  } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-     throw e
   }
 }
 
diff --git a/git-mirror-2way-pipeline.groovy b/git-mirror-2way-pipeline.groovy
index c20af8f..5dfb4d1 100644
--- a/git-mirror-2way-pipeline.groovy
+++ b/git-mirror-2way-pipeline.groovy
@@ -1,47 +1,49 @@
 def common = new com.mirantis.mk.Common()
 def git = new com.mirantis.mk.Git()
 stage("Mirror") {
-  node() {
-    try{
-      def branches = BRANCHES.tokenize(',')
-      def pollBranches = []
-      for (i=0; i < branches.size; i++) {
-          pollBranches.add([name:branches[i]])
-      }
-      dir("target") {
-        try{
-            checkout changelog: true, poll: true,
-              scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: TARGET_URL]]]
-          } catch(hudson.AbortException e){
-            if(e.message.trim().equals("Couldn't find any revision to build. Verify the repository and branch configuration for this job.")){
-                common.warningMsg("Warning: Cannot checkout target repo source repo is empty")
-            } else {
-                throw e
-            }
-          }
-      }
-      dir("source") {
-        try{
-          checkout changelog: true, poll: true,
-            scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-            extensions: [[$class: 'CleanCheckout']],  submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
-          } catch(hudson.AbortException e){
+  timeout(time: 12, unit: 'HOURS') {
+    node() {
+      try{
+        def branches = BRANCHES.tokenize(',')
+        def pollBranches = []
+        for (i=0; i < branches.size; i++) {
+            pollBranches.add([name:branches[i]])
+        }
+        dir("target") {
+          try{
+              checkout changelog: true, poll: true,
+                scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
+                extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: TARGET_URL]]]
+            } catch(hudson.AbortException e){
               if(e.message.trim().equals("Couldn't find any revision to build. Verify the repository and branch configuration for this job.")){
-                common.warningMsg("Warning: Cannot checkout source repo source repo is empty")
+                  common.warningMsg("Warning: Cannot checkout target repo source repo is empty")
               } else {
                   throw e
               }
-          }
-        git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, BRANCHES, true, true, false)
+            }
+        }
+        dir("source") {
+          try{
+            checkout changelog: true, poll: true,
+              scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
+              extensions: [[$class: 'CleanCheckout']],  submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+            } catch(hudson.AbortException e){
+                if(e.message.trim().equals("Couldn't find any revision to build. Verify the repository and branch configuration for this job.")){
+                  common.warningMsg("Warning: Cannot checkout source repo source repo is empty")
+                } else {
+                    throw e
+                }
+            }
+          git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, BRANCHES, true, true, false)
+        }
+      } catch (Throwable e) {
+         // If there was an error or exception thrown, the build failed
+         currentBuild.result = "FAILURE"
+         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+         throw e
+      } finally {
+         common.sendNotification(currentBuild.result,"",["slack"])
       }
-    } catch (Throwable e) {
-       // If there was an error or exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
     }
   }
 }
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index 5035fe6..b58358b 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -1,26 +1,28 @@
 def common = new com.mirantis.mk.Common()
 def git = new com.mirantis.mk.Git()
 stage("Mirror") {
-  node() {
-    try{
-      def branches = BRANCHES.tokenize(',')
-      def pollBranches = []
-      for (i=0; i < branches.size; i++) {
-          pollBranches.add([name:branches[i]])
+  timeout(time: 12, unit: 'HOURS') {
+    node() {
+      try{
+        def branches = BRANCHES.tokenize(',')
+        def pollBranches = []
+        for (i=0; i < branches.size; i++) {
+            pollBranches.add([name:branches[i]])
+        }
+        dir("source") {
+          checkout changelog: true, poll: true,
+            scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
+            extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+          git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, BRANCHES)
+        }
+      } catch (Throwable e) {
+         // If there was an error or exception thrown, the build failed
+         currentBuild.result = "FAILURE"
+         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+         throw e
+      } finally {
+         common.sendNotification(currentBuild.result,"",["slack"])
       }
-      dir("source") {
-        checkout changelog: true, poll: true,
-          scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-          extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
-        git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, BRANCHES)
-      }
-    } catch (Throwable e) {
-       // If there was an error or exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
     }
   }
 }
diff --git a/ironic-node-provision-pipeline.groovy b/ironic-node-provision-pipeline.groovy
index 05e5313..1826100 100644
--- a/ironic-node-provision-pipeline.groovy
+++ b/ironic-node-provision-pipeline.groovy
@@ -72,135 +72,136 @@
     return failed_nodes
 }
 
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
+            // Set build-specific variables
+            venv = "${env.WORKSPACE}/venv"
 
-node("python") {
-    try {
-        // Set build-specific variables
-        venv = "${env.WORKSPACE}/venv"
-
-        def required_params = ['IRONIC_AUTHORIZATION_PROFILE', 'IRONIC_DEPLOY_NODES']
-        def missed_params = []
-        for (param in required_params) {
-            if (env[param] == '' ) {
-                missed_params.add(param)
-            }
-        }
-        if (missed_params){
-            common.errorMsg(missed_params.join(', ') + " should be set.")
-        }
-
-        if (IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES != 'all'){
-            common.errorMsg("IRONIC_DEPLOY_PROFILE should be set when deploying specific nodes.")
-        }
-
-        if (SALT_MASTER_URL == '' && STACK_NAME == ''){
-            common.errorMsg("Any of SALT_MASTER_URL or STACK_NAME should be defined.")
-        }
-
-        if (SALT_MASTER_URL == '' && STACK_NAME != '') {
-            // Get SALT_MASTER_URL machines
-            stage ('Getting SALT_MASTER_URL') {
-
-                outputs.put('stack_type', STACK_TYPE)
-
-                if (STACK_TYPE == 'heat') {
-                    // value defaults
-                    envParams = [
-                        'cluster_zone': HEAT_STACK_ZONE,
-                        'cluster_public_net': HEAT_STACK_PUBLIC_NET
-                    ]
-
-                    // create openstack env
-                    openstack.setupOpenstackVirtualenv(venv, OPENSTACK_API_CLIENT)
-                    openstackCloud = openstack.createOpenstackEnv(
-                        OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
-                        OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
-                        OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
-                        OPENSTACK_API_VERSION)
-                    openstack.getKeystoneToken(openstackCloud, venv)
-
-
-                    // get SALT_MASTER_URL
-                    saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
-
-                } else if (STACK_TYPE == 'aws') {
-
-                    // setup environment
-                    aws.setupVirtualEnv(venv)
-
-                    // set aws_env_vars
-                    aws_env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
-
-                    // get outputs
-                    saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
+            def required_params = ['IRONIC_AUTHORIZATION_PROFILE', 'IRONIC_DEPLOY_NODES']
+            def missed_params = []
+            for (param in required_params) {
+                if (env[param] == '' ) {
+                    missed_params.add(param)
                 }
+            }
+            if (missed_params){
+                common.errorMsg(missed_params.join(', ') + " should be set.")
+            }
 
-                if (SALT_MASTER_URL == ''){
-                    // check that saltMasterHost is valid
-                    if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
-                        common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
-                        throw new Exception("saltMasterHost is not a valid ip")
+            if (IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES != 'all'){
+                common.errorMsg("IRONIC_DEPLOY_PROFILE should be set when deploying specific nodes.")
+            }
+
+            if (SALT_MASTER_URL == '' && STACK_NAME == ''){
+                common.errorMsg("Any of SALT_MASTER_URL or STACK_NAME should be defined.")
+            }
+
+            if (SALT_MASTER_URL == '' && STACK_NAME != '') {
+                // Get SALT_MASTER_URL machines
+                stage ('Getting SALT_MASTER_URL') {
+
+                    outputs.put('stack_type', STACK_TYPE)
+
+                    if (STACK_TYPE == 'heat') {
+                        // value defaults
+                        envParams = [
+                            'cluster_zone': HEAT_STACK_ZONE,
+                            'cluster_public_net': HEAT_STACK_PUBLIC_NET
+                        ]
+
+                        // create openstack env
+                        openstack.setupOpenstackVirtualenv(venv, OPENSTACK_API_CLIENT)
+                        openstackCloud = openstack.createOpenstackEnv(
+                            OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+                            OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+                            OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+                            OPENSTACK_API_VERSION)
+                        openstack.getKeystoneToken(openstackCloud, venv)
+
+
+                        // get SALT_MASTER_URL
+                        saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
+
+                    } else if (STACK_TYPE == 'aws') {
+
+                        // setup environment
+                        aws.setupVirtualEnv(venv)
+
+                        // set aws_env_vars
+                        aws_env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+
+                        // get outputs
+                        saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
                     }
-                    currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
-                    SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+
+                    if (SALT_MASTER_URL == ''){
+                        // check that saltMasterHost is valid
+                        if (!saltMasterHost || !saltMasterHost.matches(ipRegex)) {
+                            common.errorMsg("saltMasterHost is not a valid ip, value is: ${saltMasterHost}")
+                            throw new Exception("saltMasterHost is not a valid ip")
+                        }
+                        currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
+                        SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+                    } else {
+                        currentBuild.description = "${STACK_NAME}"
+                    }
+                }
+            }
+
+            outputs.put('salt_api', SALT_MASTER_URL)
+
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+
+
+            def nodes_to_deploy=[]
+
+            stage('Trigger deployment on nodes') {
+                if (IRONIC_DEPLOY_PARTITION_PROFILE == '' && IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES == 'all'){
+                    common.infoMsg("Trigger ironic.deploy")
+                    salt.enforceState(pepperEnv, RUN_TARGET, ['ironic.deploy'], true)
                 } else {
-                    currentBuild.description = "${STACK_NAME}"
+                    if (IRONIC_DEPLOY_NODES == 'all'){
+                         res = salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.list_nodes', ["profile=${IRONIC_AUTHORIZATION_PROFILE}"], null, true)
+                         // We trigger deployment on single salt minion
+                         for (n in res['return'][0].values()[0]['nodes']){
+                            nodes_to_deploy.add(n['name'])
+                         }
+                    } else {
+                        nodes_to_deploy = IRONIC_DEPLOY_NODES.tokenize(',')
+                    }
+
+                    def cmd_params = ["profile=${IRONIC_AUTHORIZATION_PROFILE}", "deployment_profile=${IRONIC_DEPLOY_PROFILE}"]
+
+                    if (IRONIC_DEPLOY_PARTITION_PROFILE){
+                        cmd_params.add("partition_profile=${IRONIC_DEPLOY_PARTITION_PROFILE}")
+                    }
+
+                    for (n in nodes_to_deploy){
+                        common.infoMsg("Trigger deployment of ${n}")
+                      salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.deploy_node', ["${n}"] + cmd_params, null, true)
+                    }
                 }
             }
-        }
 
-        outputs.put('salt_api', SALT_MASTER_URL)
-
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-
-
-        def nodes_to_deploy=[]
-
-        stage('Trigger deployment on nodes') {
-            if (IRONIC_DEPLOY_PARTITION_PROFILE == '' && IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES == 'all'){
-                common.infoMsg("Trigger ironic.deploy")
-                salt.enforceState(pepperEnv, RUN_TARGET, ['ironic.deploy'], true)
-            } else {
-                if (IRONIC_DEPLOY_NODES == 'all'){
-                     res = salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.list_nodes', ["profile=${IRONIC_AUTHORIZATION_PROFILE}"], null, true)
-                     // We trigger deployment on single salt minion
-                     for (n in res['return'][0].values()[0]['nodes']){
-                        nodes_to_deploy.add(n['name'])
-                     }
+            stage('Waiting for deployment is done.') {
+                def failed_nodes = waitIronicDeployment(pepperEnv, nodes_to_deploy, RUN_TARGET, IRONIC_AUTHORIZATION_PROFILE, IRONIC_DEPLOY_TIMEOUT)
+                if (failed_nodes){
+                    common.errorMsg("Some nodes: " + failed_nodes.join(", ") + " are failed to deploy")
+                    currentBuild.result = 'FAILURE'
                 } else {
-                    nodes_to_deploy = IRONIC_DEPLOY_NODES.tokenize(',')
-                }
-
-                def cmd_params = ["profile=${IRONIC_AUTHORIZATION_PROFILE}", "deployment_profile=${IRONIC_DEPLOY_PROFILE}"]
-
-                if (IRONIC_DEPLOY_PARTITION_PROFILE){
-                    cmd_params.add("partition_profile=${IRONIC_DEPLOY_PARTITION_PROFILE}")
-                }
-
-                for (n in nodes_to_deploy){
-                    common.infoMsg("Trigger deployment of ${n}")
-                  salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.deploy_node', ["${n}"] + cmd_params, null, true)
+                    common.successMsg("All nodes are deployed successfully.")
                 }
             }
-        }
 
-        stage('Waiting for deployment is done.') {
-            def failed_nodes = waitIronicDeployment(pepperEnv, nodes_to_deploy, RUN_TARGET, IRONIC_AUTHORIZATION_PROFILE, IRONIC_DEPLOY_TIMEOUT)
-            if (failed_nodes){
-                common.errorMsg("Some nodes: " + failed_nodes.join(", ") + " are failed to deploy")
-                currentBuild.result = 'FAILURE'
-            } else {
-                common.successMsg("All nodes are deployed successfully.")
-            }
+            outputsPretty = common.prettify(outputs)
+            print(outputsPretty)
+            writeFile(file: 'outputs.json', text: outputsPretty)
+            archiveArtifacts(artifacts: 'outputs.json')
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
         }
-
-        outputsPretty = common.prettify(outputs)
-        print(outputsPretty)
-        writeFile(file: 'outputs.json', text: outputsPretty)
-        archiveArtifacts(artifacts: 'outputs.json')
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
     }
 }
diff --git a/kafka-demo.groovy b/kafka-demo.groovy
index 50349a8..d884713 100644
--- a/kafka-demo.groovy
+++ b/kafka-demo.groovy
@@ -10,38 +10,39 @@
 common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
 orchestrate = new com.mirantis.mk.Orchestrate()
+timeout(time: 12, unit: 'HOURS') {
+    node {
 
-node {
+        // connection objects
+        def master
 
-    // connection objects
-    def master
-
-    stage("Connect to Salt master") {
-        master = salt.connection(SALT_URL, SALT_MASTER_CREDENTIALS)
-    }
+        stage("Connect to Salt master") {
+            master = salt.connection(SALT_URL, SALT_MASTER_CREDENTIALS)
+        }
 
 
-    stage("Enforce kubernetes.control") {
-        common.infoMsg('Enforcing kubernetes.control on I@kubernetes:master')
+        stage("Enforce kubernetes.control") {
+            common.infoMsg('Enforcing kubernetes.control on I@kubernetes:master')
 
-        salt.runSaltProcessStep(
-            master,
-            'I@kubernetes:master',
-            'state.sls',
-            ['kubernetes.control'],
-        )
-    }
+            salt.runSaltProcessStep(
+                master,
+                'I@kubernetes:master',
+                'state.sls',
+                ['kubernetes.control'],
+            )
+        }
 
-    stage("setup-components") {
-        common.infoMsg('Setting up components')
+        stage("setup-components") {
+            common.infoMsg('Setting up components')
 
-        salt.runSaltProcessStep(
-            master,
-            'I@kubernetes:master',
-            'cmd.run',
-            ['/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done;\'']
-        )
+            salt.runSaltProcessStep(
+                master,
+                'I@kubernetes:master',
+                'cmd.run',
+                ['/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done;\'']
+            )
+
+        }
 
     }
-
 }
diff --git a/mk-k8s-cleanup-pipeline.groovy b/mk-k8s-cleanup-pipeline.groovy
index cf7e77d..db5aa8a 100644
--- a/mk-k8s-cleanup-pipeline.groovy
+++ b/mk-k8s-cleanup-pipeline.groovy
@@ -16,28 +16,29 @@
 git = new com.mirantis.mk.Git()
 openstack = new com.mirantis.mk.Openstack()
 salt = new com.mirantis.mk.Salt()
+timeout(time: 12, unit: 'HOURS') {
+    node {
 
-node {
+        // connection objects
+        def openstackCloud
+        def saltMaster
 
-    // connection objects
-    def openstackCloud
-    def saltMaster
+        // value defaults
+        def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+        def openstackEnv = "${env.WORKSPACE}/venv"
 
-    // value defaults
-    def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-    def openstackEnv = "${env.WORKSPACE}/venv"
+        stage('Install OpenStack env') {
+            openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+        }
 
-    stage('Install OpenStack env') {
-        openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+        stage('Connect to OpenStack cloud') {
+            openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+            openstack.getKeystoneToken(openstackCloud, openstackEnv)
+        }
+
+        stage('Delete Heat stack') {
+            openstack.deleteHeatStack(openstackCloud, HEAT_STACK_NAME, openstackEnv)
+        }
+
     }
-
-    stage('Connect to OpenStack cloud') {
-        openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
-        openstack.getKeystoneToken(openstackCloud, openstackEnv)
-    }
-
-    stage('Delete Heat stack') {
-        openstack.deleteHeatStack(openstackCloud, HEAT_STACK_NAME, openstackEnv)
-    }
-
 }
diff --git a/mk-k8s-simple-deploy-pipeline.groovy b/mk-k8s-simple-deploy-pipeline.groovy
index e88d482..39ddc9c 100644
--- a/mk-k8s-simple-deploy-pipeline.groovy
+++ b/mk-k8s-simple-deploy-pipeline.groovy
@@ -33,96 +33,97 @@
 
 def pepperEnv = "pepperEnv"
 artifacts_dir = "_artifacts"
+timeout(time: 12, unit: 'HOURS') {
+    node {
 
-node {
+        // connection objects
+        def openstackCloud
 
-    // connection objects
-    def openstackCloud
+        // value defaults
+        def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+        def openstackEnv = "${env.WORKSPACE}/venv"
 
-    // value defaults
-    def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
-    def openstackEnv = "${env.WORKSPACE}/venv"
-
-    if (HEAT_STACK_NAME == "") {
-        HEAT_STACK_NAME = BUILD_TAG
-    }
-
-    stage ('Download Heat templates') {
-        git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
-    }
-
-    stage('Install OpenStack env') {
-        openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-    }
-
-    stage('Connect to OpenStack cloud') {
-        openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT,
-        "", OPENSTACK_API_PROJECT_DOMAIN_ID, OPENSTACK_API_USER_DOMAIN_ID, OPENSTACK_API_VERSION)
-        openstack.getKeystoneToken(openstackCloud, openstackEnv)
-    }
-
-    stage('Launch new Heat stack') {
-        envParams = [
-                'instance_zone': HEAT_STACK_ZONE,
-                'public_net': HEAT_STACK_PUBLIC_NET
-        ]
-        openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
-    }
-
-    stage("Connect to Salt master") {
-        saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
-        saltMasterUrl = "http://${saltMasterHost}:8088"
-        python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
-    }
-
-    stage("Install core infra") {
-        orchestrate.installFoundationInfra(pepperEnv)
-        orchestrate.validateFoundationInfra(pepperEnv)
-    }
-
-    stage("Install Kubernetes infra") {
-        orchestrate.installOpenstackMcpInfra(pepperEnv)
-    }
-
-    stage("Install Kubernetes control") {
-        orchestrate.installOpenstackMcpControl(pepperEnv)
-    }
-
-    if (RUN_TESTS == "1") {
-        sleep(30)
-        stage('Run k8s bootstrap tests') {
-            test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, 'tomkukral/k8s-scripts')
+        if (HEAT_STACK_NAME == "") {
+            HEAT_STACK_NAME = BUILD_TAG
         }
 
-        stage("Run k8s conformance e2e tests") {
-            test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, CONFORMANCE_IMAGE)
+        stage ('Download Heat templates') {
+            git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
         }
 
-        stage("Copy k8s e2e test output to config node ") {
-            test.copyTestsOutput(pepperEnv,CONFORMANCE_IMAGE)
+        stage('Install OpenStack env') {
+            openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
         }
 
-        stage("Copy k8s e2e test output to host ") {
-            sh '''
-                mkdir ${env.WORKSPACE}/${artifacts_dir}
-               '''
-            try {
-                test.catTestsOutput(pepperEnv,CONFORMANCE_IMAGE) >> ${env.WORKSPACE}/${artifacts_dir}/$CONFORMANCE_IMAGE
-            } catch (InterruptedException x) {
-                echo "The job was aborted"
-            } finally {
-                archiveArtifacts allowEmptyArchive: true, artifacts: '_artifacts/*', excludes: null
-                junit keepLongStdio: true, testResults: '_artifacts/**.xml'
-                sh "sudo chown -R jenkins:jenkins ${env.WORKSPACE}"
+        stage('Connect to OpenStack cloud') {
+            openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT,
+            "", OPENSTACK_API_PROJECT_DOMAIN_ID, OPENSTACK_API_USER_DOMAIN_ID, OPENSTACK_API_VERSION)
+            openstack.getKeystoneToken(openstackCloud, openstackEnv)
+        }
+
+        stage('Launch new Heat stack') {
+            envParams = [
+                    'instance_zone': HEAT_STACK_ZONE,
+                    'public_net': HEAT_STACK_PUBLIC_NET
+            ]
+            openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
+        }
+
+        stage("Connect to Salt master") {
+            saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
+            saltMasterUrl = "http://${saltMasterHost}:8088"
+            python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage("Install core infra") {
+            orchestrate.installFoundationInfra(pepperEnv)
+            orchestrate.validateFoundationInfra(pepperEnv)
+        }
+
+        stage("Install Kubernetes infra") {
+            orchestrate.installOpenstackMcpInfra(pepperEnv)
+        }
+
+        stage("Install Kubernetes control") {
+            orchestrate.installOpenstackMcpControl(pepperEnv)
+        }
+
+        if (RUN_TESTS == "1") {
+            sleep(30)
+            stage('Run k8s bootstrap tests') {
+                test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, 'tomkukral/k8s-scripts')
             }
 
-        }
-    }
+            stage("Run k8s conformance e2e tests") {
+                test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, CONFORMANCE_IMAGE)
+            }
 
-    if (HEAT_STACK_DELETE == "1") {
-        stage('Trigger cleanup job') {
-            build job: 'mk-k8s-cleanup', parameters: [[$class: 'StringParameterValue', name: 'HEAT_STACK_NAME', value: HEAT_STACK_NAME]]
-        }
-    }
+            stage("Copy k8s e2e test output to config node ") {
+                test.copyTestsOutput(pepperEnv,CONFORMANCE_IMAGE)
+            }
 
+            stage("Copy k8s e2e test output to host ") {
+                sh '''
+                    mkdir ${env.WORKSPACE}/${artifacts_dir}
+                   '''
+                try {
+                    test.catTestsOutput(pepperEnv,CONFORMANCE_IMAGE) >> ${env.WORKSPACE}/${artifacts_dir}/$CONFORMANCE_IMAGE
+                } catch (InterruptedException x) {
+                    echo "The job was aborted"
+                } finally {
+                    archiveArtifacts allowEmptyArchive: true, artifacts: '_artifacts/*', excludes: null
+                    junit keepLongStdio: true, testResults: '_artifacts/**.xml'
+                    sh "sudo chown -R jenkins:jenkins ${env.WORKSPACE}"
+                }
+
+            }
+        }
+
+        if (HEAT_STACK_DELETE == "1") {
+            stage('Trigger cleanup job') {
+                build job: 'mk-k8s-cleanup', parameters: [[$class: 'StringParameterValue', name: 'HEAT_STACK_NAME', value: HEAT_STACK_NAME]]
+            }
+        }
+
+    }
 }
diff --git a/mk-maaas-deploy-pipeline.groovy b/mk-maaas-deploy-pipeline.groovy
index 4142cec..924019e 100644
--- a/mk-maaas-deploy-pipeline.groovy
+++ b/mk-maaas-deploy-pipeline.groovy
@@ -24,50 +24,51 @@
 openstack = new com.mirantis.mk.openstack()
 salt = new com.mirantis.mk.salt()
 
-node {
+timeout(time: 12, unit: 'HOURS') {
+    node {
 
-    // connection objects
-    def openstackCloud
-    def saltMaster
+        // connection objects
+        def openstackCloud
+        def saltMaster
 
-    // value defaults
-    def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : "liberty"
-    def openstackEnv = "${env.WORKSPACE}/venv"
+        // value defaults
+        def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : "liberty"
+        def openstackEnv = "${env.WORKSPACE}/venv"
 
-    stage ('Download Heat templates') {
-        git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
+        stage ('Download Heat templates') {
+            git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
+        }
+
+        stage('Install OpenStack env') {
+            openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+        }
+
+        stage('Connect to OpenStack cloud') {
+            openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+            openstack.getKeystoneToken(openstackCloud, openstackEnv)
+        }
+
+        stage('Launch new Heat stack') {
+            envParams = [
+                'availability_zone': HEAT_STACK_ZONE,
+                'public_net': HEAT_STACK_PUBLIC_NET
+            ]
+            openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
+        }
+
+        stage("Connect to Salt master") {
+            saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
+            saltMasterUrl = "http://${saltMasterHost}:8000"
+            saltMaster = salt.createSaltConnection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage("Install core infra") {
+            salt.installFoundationInfra(saltMaster)
+            salt.validateFoundationInfra(saltMaster)
+        }
+
+        //stage('Delete Heat stack') {
+        //    openstack.deleteHeatStack(openstackCloud, HEAT_STACK_NAME, openstackEnv)
+        //}
     }
-
-    stage('Install OpenStack env') {
-        openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
-    }
-
-    stage('Connect to OpenStack cloud') {
-        openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
-        openstack.getKeystoneToken(openstackCloud, openstackEnv)
-    }
-
-    stage('Launch new Heat stack') {
-        envParams = [
-            'availability_zone': HEAT_STACK_ZONE,
-            'public_net': HEAT_STACK_PUBLIC_NET
-        ]
-        openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
-    }
-
-    stage("Connect to Salt master") {
-        saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
-        saltMasterUrl = "http://${saltMasterHost}:8000"
-        saltMaster = salt.createSaltConnection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
-    }
-
-    stage("Install core infra") {
-        salt.installFoundationInfra(saltMaster)
-        salt.validateFoundationInfra(saltMaster)
-    }
-
-    //stage('Delete Heat stack') {
-    //    openstack.deleteHeatStack(openstackCloud, HEAT_STACK_NAME, openstackEnv)
-    //}
-
 }
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index 83f17ee..af96600 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -54,448 +54,449 @@
     //salt.printSaltCommandResult(out)
     //input message: "Please check the output of \'${check}\' and continue if it is correct."
 }
+timeout(time: 12, unit: 'HOURS') {
+    node() {
 
-node() {
-
-    stage('Setup virtualenv for Pepper') {
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-    }
-
-    if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true && !errorOccured) {
-
-        stage('Opencontrail controllers upgrade') {
-
-            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-
-            oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
-            } catch (Exception er) {
-                errorOccured = true
-                common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                return
-            }
-
-            salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
-            salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
-
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
-            } catch (Exception er) {
-                throw new Exception('Zookeeper failed to backup. Please fix it before continuing.')
-            }
-
-            salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
-            salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
-
-            try {
-                salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
-            } catch (Exception er) {
-                throw new Exception('Cassandra failed to backup. Please fix it before continuing.')
-            }
-
-            args = 'apt install contrail-database -y;'
-            check = 'nodetool status'
-
-            // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-            // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-            // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
-
-            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
-            check = 'contrail-status'
-
-            // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-            // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-            // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
-
-            try {
-                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
-            } catch (Exception er) {
-                common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
-            }
-
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
-            salt.printSaltCommandResult(out)
-
-            common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
-        }
-    }
-
-    if (STAGE_ANALYTICS_UPGRADE.toBoolean() == true && !errorOccured) {
-
-        stage('Ask for manual confirmation') {
-            input message: "Do you want to continue with the Opencontrail analytic nodes upgrade?"
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
-        stage('Opencontrail analytics upgrade') {
+        if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true && !errorOccured) {
 
-            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            stage('Opencontrail controllers upgrade') {
 
-            oc_component_repo = oc_component_repo['return'][0].values()[0]
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
 
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
-            } catch (Exception er) {
-                errorOccured = true
-                common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                return
-            }
-
-            args = 'apt install contrail-database -y;'
-            check = 'nodetool status'
-
-            // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-            // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-            // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
-
-            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
-            check = 'contrail-status'
-
-            // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-            // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-            // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
-
-            try {
-                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
-            } catch (Exception er) {
-                common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
-            }
-
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
-            salt.printSaltCommandResult(out)
-        }
-    }
-
-    if (STAGE_COMPUTES_UPGRADE.toBoolean() == true && !errorOccured) {
-
-        try {
-
-            stage('List targeted compute servers') {
-                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
-
-                if (minions.isEmpty()) {
-                    throw new Exception("No minion was targeted")
-                }
-
-                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
-                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-                targetLiveAll = minions.join(' or ')
-                common.infoMsg("Found nodes: ${targetLiveAll}")
-                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-            }
-
-            stage('Confirm upgrade on sample nodes') {
-                input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
-            }
-
-            stage("Opencontrail compute upgrade on sample nodes") {
-
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
                 } catch (Exception er) {
                     errorOccured = true
-                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
                     return
                 }
 
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                check = 'contrail-status'
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
+                salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
+                salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
 
                 try {
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
                 } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                    throw new Exception('Zookeeper failed to backup. Please fix it before continuing.')
                 }
 
-                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
 
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+                try {
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+                } catch (Exception er) {
+                    throw new Exception('Cassandra failed to backup. Please fix it before continuing.')
+                }
 
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                args = 'apt install contrail-database -y;'
+                check = 'nodetool status'
+
+                // ntw01
+                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
+                // ntw02
+                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+                // ntw03
+                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+
+                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
+                check = 'contrail-status'
+
+                // ntw01
+                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
+                // ntw02
+                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+                // ntw03
+                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+
+                try {
+                    salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
+                }
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
                 salt.printSaltCommandResult(out)
+
+                common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
+            }
+        }
+
+        if (STAGE_ANALYTICS_UPGRADE.toBoolean() == true && !errorOccured) {
+
+            stage('Ask for manual confirmation') {
+                input message: "Do you want to continue with the Opencontrail analytic nodes upgrade?"
             }
 
-            stage('Confirm upgrade on all targeted nodes') {
-                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
-            }
-            stage("Opencontrail compute upgrade on all targeted nodes") {
+            stage('Opencontrail analytics upgrade') {
 
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    return
-                }
-
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                check = 'contrail-status'
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
-
-                try {
-                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
-                }
-
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-            }
-
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-            throw e
-        }
-    }
-
-
-    if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true && !errorOccured) {
-
-        stage('Ask for manual confirmation') {
-            input message: "Do you want to continue with the Opencontrail control nodes rollback?"
-        }
-
-       stage('Opencontrail controllers rollback') {
-
-            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-            oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
-            } catch (Exception er) {
-                errorOccured = true
-                common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                return
-            }
-
-            args = 'apt install contrail-database -y --force-yes;'
-            check = 'nodetool status'
-
-            // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-            // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-            // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
-
-            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
-            check = 'contrail-status'
-
-            // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
-            // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
-            // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
-
-            try {
-                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
-            } catch (Exception er) {
-                common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
-            }
-
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
-            salt.printSaltCommandResult(out)
-
-            common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
-        }
-    }
-
-    if (STAGE_ANALYTICS_ROLLBACK.toBoolean() == true && !errorOccured) {
-
-        stage('Ask for manual confirmation') {
-            input message: "Do you want to continue with the Opencontrail analytic nodes rollback?"
-        }
-
-        stage('Opencontrail analytics rollback') {
-
-            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-            oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
-            } catch (Exception er) {
-                errorOccured = true
-                common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                return
-            }
-
-            args = 'apt install contrail-database -y --force-yes;'
-            check = 'nodetool status'
-
-            // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-            // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-            // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
-
-            args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
-            check = 'contrail-status'
-
-            // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-            // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
-            // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
-
-            try {
-                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
-            } catch (Exception er) {
-                common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
-            }
-
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
-            salt.printSaltCommandResult(out)
-        }
-    }
-
-    if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true && !errorOccured) {
-
-        try {
-
-            stage('List targeted compute servers') {
-                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
-
-                if (minions.isEmpty()) {
-                    throw new Exception("No minion was targeted")
-                }
-
-                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
-                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-                targetLiveAll = minions.join(' or ')
-                common.infoMsg("Found nodes: ${targetLiveAll}")
-                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-            }
-
-            stage('Confirm rollback on sample nodes') {
-                input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
-            }
-
-            stage("Opencontrail compute rollback on sample nodes") {
-
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
-                oc_component_repo = oc_component_repo['return'][0].values()[0]
-
-                try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
                 } catch (Exception er) {
                     errorOccured = true
-                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
                     return
                 }
 
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                args = 'apt install contrail-database -y;'
+                check = 'nodetool status'
+
+                // nal01
+                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
+                // nal02
+                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
+                // nal03
+                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+
+                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
+                // nal01
+                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
+                // nal02
+                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
+                // nal03
+                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
 
                 try {
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                    salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
                 } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                    common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
                 }
 
-                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
                 salt.printSaltCommandResult(out)
             }
+        }
 
-            stage('Confirm rollback on all targeted nodes') {
-                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+        if (STAGE_COMPUTES_UPGRADE.toBoolean() == true && !errorOccured) {
+
+            try {
+
+                stage('List targeted compute servers') {
+                    minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
+
+                    if (minions.isEmpty()) {
+                        throw new Exception("No minion was targeted")
+                    }
+
+                    targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                    targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                    targetLiveAll = minions.join(' or ')
+                    common.infoMsg("Found nodes: ${targetLiveAll}")
+                    common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+                }
+
+                stage('Confirm upgrade on sample nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
+                }
+
+                stage("Opencontrail compute upgrade on sample nodes") {
+
+                    oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                    oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                    } catch (Exception er) {
+                        errorOccured = true
+                        common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        return
+                    }
+
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
+                }
+
+                stage('Confirm upgrade on all targeted nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+                }
+                stage("Opencontrail compute upgrade on all targeted nodes") {
+
+                    oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                    oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                        salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        return
+                    }
+
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
+                }
+
+            } catch (Throwable e) {
+                // If there was an error or exception thrown, the build failed
+                currentBuild.result = "FAILURE"
+                currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+                throw e
+            }
+        }
+
+
+        if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+            stage('Ask for manual confirmation') {
+                input message: "Do you want to continue with the Opencontrail control nodes rollback?"
             }
 
-            stage("Opencontrail compute upgrade on all targeted nodes") {
+           stage('Opencontrail controllers rollback') {
 
-                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
                 } catch (Exception er) {
-                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    errorOccured = true
+                    common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
                     return
                 }
 
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+                args = 'apt install contrail-database -y --force-yes;'
+                check = 'nodetool status'
+
+                // ntw01
+                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
+                // ntw02
+                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+                // ntw03
+                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+
+                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
+                // ntw01
+                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
+                // ntw02
+                runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+                // ntw03
+                runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
 
                 try {
-                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
+                    salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
                 } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                    common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
                 }
 
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
                 salt.printSaltCommandResult(out)
+
+                common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
+            }
+        }
+
+        if (STAGE_ANALYTICS_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+            stage('Ask for manual confirmation') {
+                input message: "Do you want to continue with the Opencontrail analytic nodes rollback?"
             }
 
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-            throw e
+            stage('Opencontrail analytics rollback') {
+
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
+                } catch (Exception er) {
+                    errorOccured = true
+                    common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    return
+                }
+
+                args = 'apt install contrail-database -y --force-yes;'
+                check = 'nodetool status'
+
+                // nal01
+                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
+                // nal02
+                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
+                // nal03
+                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+
+                args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
+                check = 'contrail-status'
+
+                // nal01
+                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
+                // nal02
+                runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
+                // nal03
+                runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+
+                try {
+                    salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
+                }
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+        }
+
+        if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+            try {
+
+                stage('List targeted compute servers') {
+                    minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
+
+                    if (minions.isEmpty()) {
+                        throw new Exception("No minion was targeted")
+                    }
+
+                    targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                    targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                    targetLiveAll = minions.join(' or ')
+                    common.infoMsg("Found nodes: ${targetLiveAll}")
+                    common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+                }
+
+                stage('Confirm rollback on sample nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
+                }
+
+                stage("Opencontrail compute rollback on sample nodes") {
+
+                    oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                    oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                    } catch (Exception er) {
+                        errorOccured = true
+                        common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        return
+                    }
+
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
+                }
+
+                stage('Confirm rollback on all targeted nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+                }
+
+                stage("Opencontrail compute upgrade on all targeted nodes") {
+
+                    oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                    oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                        salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        return
+                    }
+
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
+                }
+
+            } catch (Throwable e) {
+                // If there was an error or exception thrown, the build failed
+                currentBuild.result = "FAILURE"
+                currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+                throw e
+            }
         }
     }
-}
+}
\ No newline at end of file
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 8019128..76243e5 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -51,357 +51,358 @@
     //salt.printSaltCommandResult(out)
     //input message: "Please check the output of \'${check}\' and continue if it is correct."
 }
+timeout(time: 12, unit: 'HOURS') {
+    node() {
 
-node() {
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
 
-    stage('Setup virtualenv for Pepper') {
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-    }
+        if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true) {
 
-    if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true) {
+            stage('Opencontrail controllers upgrade') {
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'saltutil.refresh_pillar', [], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'saltutil.sync_all', [], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/cassandra.list"], null, true)
+                    salt.enforceState(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'linux.system.repo')
 
-        stage('Opencontrail controllers upgrade') {
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'saltutil.refresh_pillar', [], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'saltutil.sync_all', [], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/cassandra.list"], null, true)
-                salt.enforceState(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'linux.system.repo')
-
-            } catch (Exception er) {
-                common.errorMsg("Opencontrail component on I@opencontrail:control or I@opencontrail:collector or I@neutron:server probably failed to be replaced.")
-                throw er
-            }
-
-            try {
-                controllerImage = salt.getPillar(pepperEnv, "I@opencontrail:control and *01*", "docker:client:compose:opencontrail_api:service:controller:image")
-                analyticsImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analytics:image")
-                analyticsdbImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analyticsdb:image")
-                salt.enforceState(pepperEnv, 'I@opencontrail:database', 'docker.host')
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'dockerng.pull', [controllerImage])
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsImage])
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsdbImage])
-
-            } catch (Exception er) {
-                common.errorMsg("Docker images on I@opencontrail:control or I@opencontrail:collector probably failed to be downloaded.")
-                throw er
-            }
-
-            salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
-            salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
-
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
-            } catch (Exception er) {
-                common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
-                throw er
-            }
-
-            salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
-            salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
-
-            try {
-                salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
-            } catch (Exception er) {
-                common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
-                throw er
-            }
-            
-            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
-
-            try {
-                for (service in analyticsServices) {
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.stop', [service])
-                }
-                result = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.directory_exists', ['/var/lib/analyticsdb/data'])['return'][0].values()[0]
-                if (result == false) {
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.move', ['/var/lib/cassandra', '/var/lib/analyticsdb'])
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.copy', ['/var/lib/zookeeper', '/var/lib/analyticsdb_zookeeper_data','recurse=True'])
-                }
-                check = 'doctrail all contrail-status'
-                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'docker.client')
-                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
-            } catch (Exception er) {
-                common.errorMsg("Opencontrail Analytics failed to be upgraded.")
-                throw er
-            }
-            try {
-                check = 'doctrail all contrail-status'
-
-                for (service in configServices) {
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', [service])
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail component on I@opencontrail:control or I@opencontrail:collector or I@neutron:server probably failed to be replaced.")
+                    throw er
                 }
 
-                result = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.directory_exists', ['/var/lib/configdb/data'])['return'][0].values()[0]
-                if (result == false) {
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.copy', ['/var/lib/cassandra', '/var/lib/configdb', 'recurse=True'])
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.copy', ['/var/lib/zookeeper', '/var/lib/config_zookeeper_data', 'recurse=True'])
+                try {
+                    controllerImage = salt.getPillar(pepperEnv, "I@opencontrail:control and *01*", "docker:client:compose:opencontrail_api:service:controller:image")
+                    analyticsImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analytics:image")
+                    analyticsdbImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analyticsdb:image")
+                    salt.enforceState(pepperEnv, 'I@opencontrail:database', 'docker.host')
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'dockerng.pull', [controllerImage])
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsImage])
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsdbImage])
+
+                } catch (Exception er) {
+                    common.errorMsg("Docker images on I@opencontrail:control or I@opencontrail:collector probably failed to be downloaded.")
+                    throw er
                 }
 
-                for (service in controlServices) {
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'service.stop', [service])
+                salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
+                salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
+
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
+                } catch (Exception er) {
+                    common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
+                    throw er
                 }
 
-                salt.enforceState(pepperEnv, 'I@opencontrail:control and *0[23]*', 'docker.client')
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
 
+                try {
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+                } catch (Exception er) {
+                    common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
+                    throw er
+                }
+                
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
+
+                try {
+                    for (service in analyticsServices) {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.stop', [service])
+                    }
+                    result = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.directory_exists', ['/var/lib/analyticsdb/data'])['return'][0].values()[0]
+                    if (result == false) {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.move', ['/var/lib/cassandra', '/var/lib/analyticsdb'])
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.copy', ['/var/lib/zookeeper', '/var/lib/analyticsdb_zookeeper_data','recurse=True'])
+                    }
+                    check = 'doctrail all contrail-status'
+                    salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'docker.client')
+                    runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail Analytics failed to be upgraded.")
+                    throw er
+                }
+                try {
+                    check = 'doctrail all contrail-status'
+
+                    for (service in configServices) {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', [service])
+                    }
+
+                    result = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.directory_exists', ['/var/lib/configdb/data'])['return'][0].values()[0]
+                    if (result == false) {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.copy', ['/var/lib/cassandra', '/var/lib/configdb', 'recurse=True'])
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.copy', ['/var/lib/zookeeper', '/var/lib/config_zookeeper_data', 'recurse=True'])
+                    }
+
+                    for (service in controlServices) {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'service.stop', [service])
+                    }
+
+                    salt.enforceState(pepperEnv, 'I@opencontrail:control and *0[23]*', 'docker.client')
+
+                    runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+
+                    sleep(120)
+
+                    for (service in controlServices) {
+                        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'service.stop', [service])
+                    }
+
+                    salt.enforceState(pepperEnv, 'I@opencontrail:control and *01*', 'docker.client')
+
+                    salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.install', ['neutron-plugin-contrail,contrail-heat,python-contrail'])
+                    salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'])
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail Controller failed to be upgraded.")
+                    throw er
+                }
+
+            }
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-database.tgz', '/var/lib/cassandra'])
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
+            //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs])
+            //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs])
+            for (service in controlServices) {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
+            }
+            for (service in analyticsServices) {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
+                }
+        }
+
+
+        if (STAGE_COMPUTES_UPGRADE.toBoolean() == true) {
+
+            try {
+
+                stage('List targeted compute servers') {
+                    minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
+
+                    if (minions.isEmpty()) {
+                        throw new Exception("No minion was targeted")
+                    }
+
+                    targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                    targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                    targetLiveAll = minions.join(' or ')
+                    common.infoMsg("Found nodes: ${targetLiveAll}")
+                    common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+                }
+
+                stage('Confirm upgrade on sample nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
+                }
+
+                stage("Opencontrail compute upgrade on sample nodes") {
+
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.sync_all', [], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        throw er
+                    }
+
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
+                }
+
+                stage('Confirm upgrade on all targeted nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+                }
+                stage("Opencontrail compute upgrade on all targeted nodes") {
+
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.sync_all', [], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                        salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        throw er
+                    }
+
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
+                }
+
+            } catch (Throwable e) {
+                // If there was an error or exception thrown, the build failed
+                currentBuild.result = "FAILURE"
+                currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+                throw e
+            }
+        }
+
+
+        if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true) {
+
+            stage('Ask for manual confirmation') {
+                input message: "Do you want to continue with the Opencontrail nodes rollback?"
+            }
+
+           stage('Opencontrail controllers rollback') {
+
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(pepperEnv, 'I@opencontrail:database', 'linux.system.repo')
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+                for (service in config4Services) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ["doctrail controller systemctl stop ${service}"], null, true)
+                }
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+
+                check = 'contrail-status'
                 runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
 
                 sleep(120)
 
-                for (service in controlServices) {
-                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'service.stop', [service])
-                }
-
-                salt.enforceState(pepperEnv, 'I@opencontrail:control and *01*', 'docker.client')
-
-                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.install', ['neutron-plugin-contrail,contrail-heat,python-contrail'])
-                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'])
-            } catch (Exception er) {
-                common.errorMsg("Opencontrail Controller failed to be upgraded.")
-                throw er
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
             }
-
-        }
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-database.tgz', '/var/lib/cassandra'])
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
-        //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs])
-        //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs])
-        for (service in controlServices) {
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
-        }
-        for (service in analyticsServices) {
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
-            }
-    }
-
-
-    if (STAGE_COMPUTES_UPGRADE.toBoolean() == true) {
-
-        try {
-
-            stage('List targeted compute servers') {
-                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
-
-                if (minions.isEmpty()) {
-                    throw new Exception("No minion was targeted")
-                }
-
-                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
-                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-                targetLiveAll = minions.join(' or ')
-                common.infoMsg("Found nodes: ${targetLiveAll}")
-                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-            }
-
-            stage('Confirm upgrade on sample nodes') {
-                input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
-            }
-
-            stage("Opencontrail compute upgrade on sample nodes") {
-
-                try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.sync_all', [], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    throw er
-                }
-
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                check = 'contrail-status'
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
-
-                try {
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
-                }
-
-                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-            }
-
-            stage('Confirm upgrade on all targeted nodes') {
-                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
-            }
-            stage("Opencontrail compute upgrade on all targeted nodes") {
-
-                try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.sync_all', [], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
-                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    throw er
-                }
-
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                check = 'contrail-status'
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
-
-                try {
-                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
-                }
-
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-            }
-
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-            throw e
-        }
-    }
-
-
-    if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true) {
-
-        stage('Ask for manual confirmation') {
-            input message: "Do you want to continue with the Opencontrail nodes rollback?"
         }
 
-       stage('Opencontrail controllers rollback') {
+        if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true) {
 
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'saltutil.refresh_pillar', [], null, true)
-            salt.enforceState(pepperEnv, 'I@opencontrail:database', 'linux.system.repo')
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+            try {
 
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+                stage('List targeted compute servers') {
+                    minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
 
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
-            for (service in config4Services) {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ["doctrail controller systemctl stop ${service}"], null, true)
-            }
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+                    if (minions.isEmpty()) {
+                        throw new Exception("No minion was targeted")
+                    }
 
-            check = 'contrail-status'
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+                    targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                    targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
 
-            sleep(120)
-
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
-        }
-    }
-
-    if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true) {
-
-        try {
-
-            stage('List targeted compute servers') {
-                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
-
-                if (minions.isEmpty()) {
-                    throw new Exception("No minion was targeted")
+                    targetLiveAll = minions.join(' or ')
+                    common.infoMsg("Found nodes: ${targetLiveAll}")
+                    common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
                 }
 
-                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
-                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-                targetLiveAll = minions.join(' or ')
-                common.infoMsg("Found nodes: ${targetLiveAll}")
-                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-            }
-
-            stage('Confirm rollback on sample nodes') {
-                input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
-            }
-
-            stage("Opencontrail compute rollback on sample nodes") {
-
-                try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    throw er
+                stage('Confirm rollback on sample nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
                 }
 
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                check = 'contrail-status'
+                stage("Opencontrail compute rollback on sample nodes") {
 
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        throw er
+                    }
 
-                try {
-                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
                 }
 
-                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
-            }
-
-            stage('Confirm rollback on all targeted nodes') {
-                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
-            }
-
-            stage("Opencontrail compute upgrade on all targeted nodes") {
-
-                try {
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
-                    throw er
+                stage('Confirm rollback on all targeted nodes') {
+                    input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
                 }
 
-                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
-                check = 'contrail-status'
+                stage("Opencontrail compute upgrade on all targeted nodes") {
 
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
-                salt.printSaltCommandResult(out)
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                        salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                        throw er
+                    }
 
-                try {
-                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
-                } catch (Exception er) {
-                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                    args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                    check = 'contrail-status'
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                    salt.printSaltCommandResult(out)
+
+                    try {
+                        salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
+                    } catch (Exception er) {
+                        common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                    }
+
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                    //sleep(10)
+                    salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                    salt.printSaltCommandResult(out)
                 }
 
-                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                //sleep(10)
-                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
-
-                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
-                salt.printSaltCommandResult(out)
+            } catch (Throwable e) {
+                // If there was an error or exception thrown, the build failed
+                currentBuild.result = "FAILURE"
+                currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+                throw e
             }
-
-        } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
-            currentBuild.result = "FAILURE"
-            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-            throw e
         }
     }
 }
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 3cf6aaa..3c28552 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -18,87 +18,88 @@
 def command
 def commandKwargs
 
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
 
-node() {
-    try {
-
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        stage('List target servers') {
-            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
-            if (minions.isEmpty()) {
-                throw new Exception("No minion was targeted")
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
 
-            targetLiveAll = minions.join(' or ')
-            common.infoMsg("Found nodes: ${targetLiveAll}")
-            common.infoMsg("Selected nodes: ${targetLiveAll}")
-        }
+            stage('List target servers') {
+                minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
 
-        stage("Trusty workaround") {
-            if(salt.getGrain(pepperEnv, minions[0], "oscodename")['return'][0].values()[0]["oscodename"] == "trusty") {
-                common.infoMsg("First node %nodename% has trusty")
-                common.infoMsg("Assuming trusty on all cluster, running extra network states...")
-                common.infoMsg("Network iteration #1. Bonding")
-                salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
-                common.infoMsg("Network iteration #2. Vlan tagging and bridging")
-                salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected nodes: ${targetLiveAll}")
             }
+
+            stage("Trusty workaround") {
+                if(salt.getGrain(pepperEnv, minions[0], "oscodename")['return'][0].values()[0]["oscodename"] == "trusty") {
+                    common.infoMsg("First node %nodename% has trusty")
+                    common.infoMsg("Assuming trusty on all cluster, running extra network states...")
+                    common.infoMsg("Network iteration #1. Bonding")
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
+                    common.infoMsg("Network iteration #2. Vlan tagging and bridging")
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
+                }
+            }
+
+            stage("Setup repositories") {
+                salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true)
+            }
+
+            stage("Upgrade packages") {
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], null, true)
+            }
+
+            stage("Setup networking") {
+                // Sync all of the modules from the salt master.
+                salt.syncAll(pepperEnv, targetLiveAll)
+
+                // Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
+
+                // Restart salt-minion to take effect.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
+
+                // Configure networking excluding vhost0 interface.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
+
+                // Kill unnecessary processes ifup/ifdown which is stuck from previous state linux.network.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
+
+                // Restart networking to bring UP all interfaces.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
+            }
+
+            stage("Highstate compute") {
+                // Execute highstate without state opencontrail.client.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
+
+                // Apply nova state to remove libvirt default bridge virbr0.
+                salt.enforceState(pepperEnv, targetLiveAll, 'nova', true)
+
+                // Execute highstate.
+                salt.enforceHighstate(pepperEnv, targetLiveAll, true)
+
+                // Restart supervisor-vrouter.
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
+
+                // Apply salt,collectd to update information about current network interfaces.
+                salt.enforceState(pepperEnv, targetLiveAll, 'salt,collectd', true)
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
         }
-
-        stage("Setup repositories") {
-            salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true)
-        }
-
-        stage("Upgrade packages") {
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], null, true)
-        }
-
-        stage("Setup networking") {
-            // Sync all of the modules from the salt master.
-            salt.syncAll(pepperEnv, targetLiveAll)
-
-            // Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
-
-            // Restart salt-minion to take effect.
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
-
-            // Configure networking excluding vhost0 interface.
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
-
-            // Kill unnecessary processes ifup/ifdown which is stuck from previous state linux.network.
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
-
-            // Restart networking to bring UP all interfaces.
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
-        }
-
-        stage("Highstate compute") {
-            // Execute highstate without state opencontrail.client.
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
-
-            // Apply nova state to remove libvirt default bridge virbr0.
-            salt.enforceState(pepperEnv, targetLiveAll, 'nova', true)
-
-            // Execute highstate.
-            salt.enforceHighstate(pepperEnv, targetLiveAll, true)
-
-            // Restart supervisor-vrouter.
-            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
-
-            // Apply salt,collectd to update information about current network interfaces.
-            salt.enforceState(pepperEnv, targetLiveAll, 'salt,collectd', true)
-        }
-
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-        throw e
     }
 }
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
index 6c8ffdf..a59fc08 100644
--- a/openstack-compute-upgrade.groovy
+++ b/openstack-compute-upgrade.groovy
@@ -25,228 +25,230 @@
 def commandKwargs
 def probe = 1
 
-node() {
-    try {
-
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        stage('List target servers') {
-            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
-            if (minions.isEmpty()) {
-                throw new Exception("No minion was targeted")
-            }
-
-            if (TARGET_SUBSET_TEST != "") {
-                targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
-            } else {
-                targetTestSubset = minions.join(' or ')
-            }
-            targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
-            targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
-            targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-            targetLiveAll = minions.join(' or ')
-            common.infoMsg("Found nodes: ${targetLiveAll}")
-            common.infoMsg("Selected test nodes: ${targetTestSubset}")
-            common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-        }
-
-
-        stage("Add new repos on test nodes") {
-            salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
-        }
-
-
-        opencontrail = null
-
+timeout(time: 12, unit: 'HOURS') {
+    node() {
         try {
-            opencontrail = salt.cmdRun(pepperEnv, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
-            print(opencontrail)
-        } catch (Exception er) {
-            common.infoMsg("opencontrail is not used")
-        }
 
-        if(opencontrail != null) {
-            stage('Remove OC component from repos on test nodes') {
-                def contrail_repo_file1 = ''
-                def contrail_repo_file2 = ''
-                try {
-                    contrail_repo_file1 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
-                    contrail_repo_file2 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
-                } catch (Exception er) {
-                    common.warningMsg(er)
-                }
-                salt.cmdRun(pepperEnv, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
-                try {
-                    salt.cmdRun(pepperEnv, targetTestSubset, "salt-call pkg.refresh_db")
-                } catch (Exception er) {
-                    common.warningMsg(er)
-                    // remove the malformed repo entry
-                    salt.cmdRun(pepperEnv, targetTestSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
-                    salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.refresh_db', [], null, true)
-                }
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
-        }
 
-        stage("List package upgrades") {
-            salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
-        }
+            stage('List target servers') {
+                minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
 
-        stage('Confirm upgrade on sample nodes') {
-            input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
-        }
-
-        stage("Add new repos on sample nodes") {
-            salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
-        }
-
-        if(opencontrail != null) {
-            stage('Remove OC component from repos on sample nodes') {
-                def contrail_repo_file1 = ''
-                def contrail_repo_file2 = ''
-                try {
-                    contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
-                    contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
-                } catch (Exception er) {
-                    common.warningMsg(er)
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
                 }
-                salt.cmdRun(pepperEnv, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
-                try {
-                    salt.cmdRun(pepperEnv, targetLiveSubset, "salt-call pkg.refresh_db")
-                } catch (Exception er) {
-                    common.warningMsg(er)
-                    // remove the malformed repo entry
-                    salt.cmdRun(pepperEnv, targetLiveSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
-                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'pkg.refresh_db', [], null, true)
+
+                if (TARGET_SUBSET_TEST != "") {
+                    targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
+                } else {
+                    targetTestSubset = minions.join(' or ')
                 }
+                targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
+                targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
+                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected test nodes: ${targetTestSubset}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
             }
-        }
 
-        args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
 
-        stage('Test upgrade on sample') {
+            stage("Add new repos on test nodes") {
+                salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
+            }
+
+
+            opencontrail = null
+
             try {
-                salt.cmdRun(pepperEnv, targetLiveSubset, args)
+                opencontrail = salt.cmdRun(pepperEnv, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
+                print(opencontrail)
             } catch (Exception er) {
-                print(er)
+                common.infoMsg("opencontrail is not used")
             }
-        }
 
-        stage('Confirm upgrade on sample') {
-            input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
-        }
+            if(opencontrail != null) {
+                stage('Remove OC component from repos on test nodes') {
+                    def contrail_repo_file1 = ''
+                    def contrail_repo_file2 = ''
+                    try {
+                        contrail_repo_file1 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
+                        contrail_repo_file2 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
+                    } catch (Exception er) {
+                        common.warningMsg(er)
+                    }
+                    salt.cmdRun(pepperEnv, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+                    try {
+                        salt.cmdRun(pepperEnv, targetTestSubset, "salt-call pkg.refresh_db")
+                    } catch (Exception er) {
+                        common.warningMsg(er)
+                        // remove the malformed repo entry
+                        salt.cmdRun(pepperEnv, targetTestSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
+                        salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.refresh_db', [], null, true)
+                    }
+                }
+            }
 
-        command = "cmd.run"
-        args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+            stage("List package upgrades") {
+                salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+            }
 
-        stage('Apply package upgrades on sample') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
-            salt.printSaltCommandResult(out)
-        }
+            stage('Confirm upgrade on sample nodes') {
+                input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
+            }
 
-        openvswitch = null
+            stage("Add new repos on sample nodes") {
+                salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+            }
 
-        try {
-            openvswitch = salt.cmdRun(pepperEnv, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
-        } catch (Exception er) {
-            common.infoMsg("openvswitch is not used")
-        }
+            if(opencontrail != null) {
+                stage('Remove OC component from repos on sample nodes') {
+                    def contrail_repo_file1 = ''
+                    def contrail_repo_file2 = ''
+                    try {
+                        contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
+                        contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
+                    } catch (Exception er) {
+                        common.warningMsg(er)
+                    }
+                    salt.cmdRun(pepperEnv, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+                    try {
+                        salt.cmdRun(pepperEnv, targetLiveSubset, "salt-call pkg.refresh_db")
+                    } catch (Exception er) {
+                        common.warningMsg(er)
+                        // remove the malformed repo entry
+                        salt.cmdRun(pepperEnv, targetLiveSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
+                        salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'pkg.refresh_db', [], null, true)
+                    }
+                }
+            }
 
-        if(openvswitch != null) {
-            args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
+            args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
 
-            stage('Start ovs on sample nodes') {
+            stage('Test upgrade on sample') {
+                try {
+                    salt.cmdRun(pepperEnv, targetLiveSubset, args)
+                } catch (Exception er) {
+                    print(er)
+                }
+            }
+
+            stage('Confirm upgrade on sample') {
+                input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
+            }
+
+            command = "cmd.run"
+            args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+
+            stage('Apply package upgrades on sample') {
                 out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
                 salt.printSaltCommandResult(out)
             }
-            stage("Run salt states on sample nodes") {
-                salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'neutron'])
-            }
-        } else {
-            stage("Run salt states on sample nodes") {
-                salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'linux.system.repo'])
-            }
-        }
 
-        stage("Run Highstate on sample nodes") {
+            openvswitch = null
+
             try {
-                salt.enforceHighstate(pepperEnv, targetLiveSubset)
+                openvswitch = salt.cmdRun(pepperEnv, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
             } catch (Exception er) {
-                common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
+                common.infoMsg("openvswitch is not used")
             }
-        }
 
-        stage('Confirm upgrade on all targeted nodes') {
-            timeout(time: 2, unit: 'HOURS') {
-               input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
-            }
-        }
+            if(openvswitch != null) {
+                args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
 
-        stage("Add new repos on all targeted nodes") {
-            salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
-        }
-
-        if(opencontrail != null) { 
-            stage('Remove OC component from repos on all targeted nodes') {
-                def contrail_repo_file1 = ''
-                def contrail_repo_file2 = ''
-                try {
-                    contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
-                    contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
-                } catch (Exception er) {
-                    common.warningMsg(er)
+                stage('Start ovs on sample nodes') {
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+                    salt.printSaltCommandResult(out)
                 }
-                salt.cmdRun(pepperEnv, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
-                try {
-                    salt.cmdRun(pepperEnv, targetLiveAll, "salt-call pkg.refresh_db")
-                } catch (Exception er) {
-                    common.warningMsg(er)
-                    // remove the malformed repo entry
-                    salt.cmdRun(pepperEnv, targetLiveAll, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
-                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.refresh_db', [], null, true)
+                stage("Run salt states on sample nodes") {
+                    salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'neutron'])
+                }
+            } else {
+                stage("Run salt states on sample nodes") {
+                    salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'linux.system.repo'])
                 }
             }
-        }
 
-        args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+            stage("Run Highstate on sample nodes") {
+                try {
+                    salt.enforceHighstate(pepperEnv, targetLiveSubset)
+                } catch (Exception er) {
+                    common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
+                }
+            }
 
-        stage('Apply package upgrades on all targeted nodes') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
-            salt.printSaltCommandResult(out)
-        }
+            stage('Confirm upgrade on all targeted nodes') {
+                timeout(time: 2, unit: 'HOURS') {
+                   input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
+                }
+            }
 
-        if(openvswitch != null) {
-            args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
+            stage("Add new repos on all targeted nodes") {
+                salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+            }
 
-            stage('Start ovs on all targeted nodes') {
+            if(opencontrail != null) { 
+                stage('Remove OC component from repos on all targeted nodes') {
+                    def contrail_repo_file1 = ''
+                    def contrail_repo_file2 = ''
+                    try {
+                        contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
+                        contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
+                    } catch (Exception er) {
+                        common.warningMsg(er)
+                    }
+                    salt.cmdRun(pepperEnv, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+                    try {
+                        salt.cmdRun(pepperEnv, targetLiveAll, "salt-call pkg.refresh_db")
+                    } catch (Exception er) {
+                        common.warningMsg(er)
+                        // remove the malformed repo entry
+                        salt.cmdRun(pepperEnv, targetLiveAll, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
+                        salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.refresh_db', [], null, true)
+                    }
+                }
+            }
+
+            args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+
+            stage('Apply package upgrades on all targeted nodes') {
                 out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
                 salt.printSaltCommandResult(out)
             }
-            stage("Run salt states on all targeted nodes") {
-                salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'neutron'])
-            }
-        } else {
-            stage("Run salt states on all targeted nodes") {
-                salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'linux.system.repo'])
-            }
-        }
 
-        stage("Run Highstate on all targeted nodes") {
-            try {
-                salt.enforceHighstate(pepperEnv, targetLiveAll)
-            } catch (Exception er) {
-                common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
-            }
-        }
+            if(openvswitch != null) {
+                args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
 
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-        throw e
+                stage('Start ovs on all targeted nodes') {
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+                    salt.printSaltCommandResult(out)
+                }
+                stage("Run salt states on all targeted nodes") {
+                    salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'neutron'])
+                }
+            } else {
+                stage("Run salt states on all targeted nodes") {
+                    salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'linux.system.repo'])
+                }
+            }
+
+            stage("Run Highstate on all targeted nodes") {
+                try {
+                    salt.enforceHighstate(pepperEnv, targetLiveAll)
+                } catch (Exception er) {
+                    common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
+                }
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        }
     }
 }
 
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 0c13b05..31e214d 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -16,407 +16,569 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
 
-node() {
+        stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
 
-    stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-    }
-
-    if (STAGE_TEST_UPGRADE.toBoolean() == true) {
-        stage('Test upgrade') {
-
-            try {
-                salt.enforceState(pepperEnv, 'I@salt:master', 'reclass')
-            } catch (Exception e) {
-                common.warningMsg("Some parts of Reclass state failed. The most probable reasons were uncommited changes. We should continue to run")
-            }
-
-            try {
-                salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
-            } catch (Exception e) {
-                common.warningMsg("No response from some minions. We should continue to run")
-            }
-
-            try {
-                salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
-            } catch (Exception e) {
-                common.warningMsg("No response from some minions. We should continue to run")
-            }
-
-            def domain = salt.getPillar(pepperEnv, 'I@salt:master', '_param:cluster_domain')
-            domain = domain['return'][0].values()[0]
-
-            // read backupninja variable
-            _pillar = salt.getPillar(pepperEnv, 'I@backupninja:client', '_param:backupninja_backup_host')
-            def backupninja_backup_host = _pillar['return'][0].values()[0]
-
-            if (SKIP_VM_RELAUNCH.toBoolean() == false) {
-
-                _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
-                def kvm01 = _pillar['return'][0].values()[0].values()[0]
-                print(_pillar)
-                print(kvm01)
-
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
-                def upgNodeProvider = _pillar['return'][0].values()[0]
-                print(_pillar)
-                print(upgNodeProvider)
-
-                salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
-                salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
+        if (STAGE_TEST_UPGRADE.toBoolean() == true) {
+            stage('Test upgrade') {
 
                 try {
-                    salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d upg01.${domain} -y")
+                    salt.enforceState(pepperEnv, 'I@salt:master', 'reclass')
                 } catch (Exception e) {
-                    common.warningMsg("upg01.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
+                    common.warningMsg("Some parts of Reclass state failed. The most probable reasons were uncommited changes. We should continue to run")
                 }
 
-                // salt 'kvm02*' state.sls salt.control
-                salt.enforceState(pepperEnv, "${upgNodeProvider}", 'salt.control')
-                // wait until upg node is registered in salt-key
-                salt.minionPresent(pepperEnv, 'I@salt:master', 'upg01')
-                // salt '*' saltutil.refresh_pillar
-                salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.refresh_pillar', [], null, true)
-                // salt '*' saltutil.sync_all
-                salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.sync_all', [], null, true)
-            }
+                try {
+                    salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
+                } catch (Exception e) {
+                    common.warningMsg("No response from some minions. We should continue to run")
+                }
 
-            // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
-            try {
-                salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh'])
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            try {
-                salt.runSaltProcessStep(master, 'upg*', 'state.sls', ["salt.minion"], null, true, 60)
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            try {
-                salt.enforceState(pepperEnv, 'upg*', ['ntp', 'rsyslog'])
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+                try {
+                    salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+                } catch (Exception e) {
+                    common.warningMsg("No response from some minions. We should continue to run")
+                }
 
-            // salt "upg*" state.sls rabbitmq
-            salt.enforceState(pepperEnv, 'upg*', ['rabbitmq', 'memcached'])
-            try {
-                salt.enforceState(pepperEnv, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
-            } catch (Exception e) {
-                common.warningMsg('salt-minion was restarted. We should continue to run')
-            }
-            try {
-                salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
-            } catch (Exception e) {
-                common.warningMsg('salt-minion was restarted. We should continue to run')
-            }
-            // salt '*' state.apply salt.minion.grains
-            //salt.enforceState(pepperEnv, '*', 'salt.minion.grains')
-            // salt -C 'I@backupninja:server' state.sls backupninja
-            salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
-            // salt -C 'I@backupninja:client' state.sls backupninja
-            salt.enforceState(pepperEnv, 'I@backupninja:client', 'backupninja')
-            salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
-            try {
-                salt.cmdRun(pepperEnv, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
-            } catch (Exception e) {
-                common.warningMsg('The ARP entry does not exist. We should continue to run.')
-            }
-            salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
-            salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
-            salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+                def domain = salt.getPillar(pepperEnv, 'I@salt:master', '_param:cluster_domain')
+                domain = domain['return'][0].values()[0]
 
-            salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
-            salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
-            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+                // read backupninja variable
+                _pillar = salt.getPillar(pepperEnv, 'I@backupninja:client', '_param:backupninja_backup_host')
+                def backupninja_backup_host = _pillar['return'][0].values()[0]
 
-            def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
-            if(databases && databases != ""){
-                def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
-                for( i = 0; i < databasesList.size(); i++){
-                    if(databasesList[i].toLowerCase().contains('upgrade')){
-                        salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
-                        common.warningMsg("removing database ${databasesList[i]}")
-                        salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+                if (SKIP_VM_RELAUNCH.toBoolean() == false) {
+
+                    _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
+                    def kvm01 = _pillar['return'][0].values()[0].values()[0]
+                    print(_pillar)
+                    print(kvm01)
+
+                    _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
+                    def upgNodeProvider = _pillar['return'][0].values()[0]
+                    print(_pillar)
+                    print(upgNodeProvider)
+
+                    salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
+
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d upg01.${domain} -y")
+                    } catch (Exception e) {
+                        common.warningMsg("upg01.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
                     }
+
+                    // salt 'kvm02*' state.sls salt.control
+                    salt.enforceState(pepperEnv, "${upgNodeProvider}", 'salt.control')
+                    // wait until upg node is registered in salt-key
+                    salt.minionPresent(pepperEnv, 'I@salt:master', 'upg01')
+                    // salt '*' saltutil.refresh_pillar
+                    salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.refresh_pillar', [], null, true)
+                    // salt '*' saltutil.sync_all
+                    salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.sync_all', [], null, true)
                 }
-                salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
-            }else{
-                common.errorMsg("No _upgrade databases were returned")
-            }
 
-            try {
+                // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh'])
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                try {
+                    salt.runSaltProcessStep(master, 'upg*', 'state.sls', ["salt.minion"], null, true, 60)
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', ['ntp', 'rsyslog'])
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+
+                // salt "upg*" state.sls rabbitmq
+                salt.enforceState(pepperEnv, 'upg*', ['rabbitmq', 'memcached'])
+                try {
+                    salt.enforceState(pepperEnv, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
+                } catch (Exception e) {
+                    common.warningMsg('salt-minion was restarted. We should continue to run')
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
+                } catch (Exception e) {
+                    common.warningMsg('salt-minion was restarted. We should continue to run')
+                }
+                // salt '*' state.apply salt.minion.grains
+                //salt.enforceState(pepperEnv, '*', 'salt.minion.grains')
+                // salt -C 'I@backupninja:server' state.sls backupninja
+                salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
+                // salt -C 'I@backupninja:client' state.sls backupninja
+                salt.enforceState(pepperEnv, 'I@backupninja:client', 'backupninja')
+                salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
+                try {
+                    salt.cmdRun(pepperEnv, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
+                } catch (Exception e) {
+                    common.warningMsg('The ARP entry does not exist. We should continue to run.')
+                }
+                salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
+                salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
+                salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+
+                salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
+                salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
+                salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+                salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+
+                def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
+                if(databases && databases != ""){
+                    def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
+                    for( i = 0; i < databasesList.size(); i++){
+                        if(databasesList[i].toLowerCase().contains('upgrade')){
+                            salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+                            common.warningMsg("removing database ${databasesList[i]}")
+                            salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+                        }
+                    }
+                    salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
+                }else{
+                    common.errorMsg("No _upgrade databases were returned")
+                }
+
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'keystone.server')
+                    salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
+                } catch (Exception e) {
+                    common.warningMsg('Restarting Apache2')
+                    salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
+                } catch (Exception e) {
+                    common.warningMsg('running keystone.client state again')
+                    salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'glance')
+                } catch (Exception e) {
+                    common.warningMsg('running glance state again')
+                    salt.enforceState(pepperEnv, 'upg*', 'glance')
+                }
                 salt.enforceState(pepperEnv, 'upg*', 'keystone.server')
-                salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
-            } catch (Exception e) {
-                common.warningMsg('Restarting Apache2')
-                salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
-            }
-            try {
-                salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
-            } catch (Exception e) {
-                common.warningMsg('running keystone.client state again')
-                salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
-            }
-            try {
-                salt.enforceState(pepperEnv, 'upg*', 'glance')
-            } catch (Exception e) {
-                common.warningMsg('running glance state again')
-                salt.enforceState(pepperEnv, 'upg*', 'glance')
-            }
-            salt.enforceState(pepperEnv, 'upg*', 'keystone.server')
-            try {
-                salt.enforceState(pepperEnv, 'upg*', 'nova')
-            } catch (Exception e) {
-                common.warningMsg('running nova state again')
-                salt.enforceState(pepperEnv, 'upg*', 'nova')
-            }
-            // run nova state again as sometimes nova does not enforce itself for some reason
-            try {
-                salt.enforceState(pepperEnv, 'upg*', 'nova')
-            } catch (Exception e) {
-                common.warningMsg('running nova state again')
-                salt.enforceState(pepperEnv, 'upg*', 'nova')
-            }
-            try {
-                salt.enforceState(pepperEnv, 'upg*', 'cinder')
-            } catch (Exception e) {
-                common.warningMsg('running cinder state again')
-                salt.enforceState(pepperEnv, 'upg*', 'cinder')
-            }
-            try {
-                salt.enforceState(pepperEnv, 'upg*', 'neutron')
-            } catch (Exception e) {
-                common.warningMsg('running neutron state again')
-                salt.enforceState(pepperEnv, 'upg*', 'neutron')
-            }
-            try {
-                salt.enforceState(pepperEnv, 'upg*', 'heat')
-            } catch (Exception e) {
-                common.warningMsg('running heat state again')
-                salt.enforceState(pepperEnv, 'upg*', 'heat')
-            }
-            salt.cmdRun(pepperEnv, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'nova')
+                } catch (Exception e) {
+                    common.warningMsg('running nova state again')
+                    salt.enforceState(pepperEnv, 'upg*', 'nova')
+                }
+                // run nova state again as sometimes nova does not enforce itself for some reason
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'nova')
+                } catch (Exception e) {
+                    common.warningMsg('running nova state again')
+                    salt.enforceState(pepperEnv, 'upg*', 'nova')
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'cinder')
+                } catch (Exception e) {
+                    common.warningMsg('running cinder state again')
+                    salt.enforceState(pepperEnv, 'upg*', 'cinder')
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'neutron')
+                } catch (Exception e) {
+                    common.warningMsg('running neutron state again')
+                    salt.enforceState(pepperEnv, 'upg*', 'neutron')
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'upg*', 'heat')
+                } catch (Exception e) {
+                    common.warningMsg('running heat state again')
+                    salt.enforceState(pepperEnv, 'upg*', 'heat')
+                }
+                salt.cmdRun(pepperEnv, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
 
-            if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
-                stage('Ask for manual confirmation') {
-                    input message: "Do you want to continue with upgrade?"
+                if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
+                    stage('Ask for manual confirmation') {
+                        input message: "Do you want to continue with upgrade?"
+                    }
                 }
             }
         }
-    }
 
-    if (STAGE_REAL_UPGRADE.toBoolean() == true) {
-        stage('Real upgrade') {
-            // # actual upgrade
+        if (STAGE_REAL_UPGRADE.toBoolean() == true) {
+            stage('Real upgrade') {
+                // # actual upgrade
 
-            def domain = salt.getPillar(pepperEnv, 'I@salt:master', '_param:cluster_domain')
-            domain = domain['return'][0].values()[0]
+                def domain = salt.getPillar(pepperEnv, 'I@salt:master', '_param:cluster_domain')
+                domain = domain['return'][0].values()[0]
 
-            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
-            kvm01 = _pillar['return'][0].values()[0].values()[0]
-            print(_pillar)
-            print(kvm01)
+                _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
+                kvm01 = _pillar['return'][0].values()[0].values()[0]
+                print(_pillar)
+                print(kvm01)
 
-            def errorOccured = false
+                def errorOccured = false
 
-            def proxy_general_target = ""
-            def proxy_target_hosts = salt.getMinions(pepperEnv, 'I@horizon:server').sort()
-            def node_count = 1
+                def proxy_general_target = ""
+                def proxy_target_hosts = salt.getMinions(pepperEnv, 'I@horizon:server').sort()
+                def node_count = 1
 
-            for (t in proxy_target_hosts) {
-                def target = t.split("\\.")[0]
-                proxy_general_target = target.replaceAll('\\d+$', "")
-                if (SKIP_VM_RELAUNCH.toBoolean() == true) {
-                    break
+                for (t in proxy_target_hosts) {
+                    def target = t.split("\\.")[0]
+                    proxy_general_target = target.replaceAll('\\d+$', "")
+                    if (SKIP_VM_RELAUNCH.toBoolean() == true) {
+                        break
+                    }
+                    _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:prx0${node_count}:provider")
+                    def nodeProvider = _pillar['return'][0].values()[0]
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
+                    sleep(2)
+                    try {
+                        salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
+                    } catch (Exception e) {
+                        common.warningMsg('File already exists')
+                    }
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"], null, true)
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+                    } catch (Exception e) {
+                        common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+                    }
+                    node_count++
                 }
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:prx0${node_count}:provider")
-                def nodeProvider = _pillar['return'][0].values()[0]
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
-                sleep(2)
-                try {
-                    salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
-                } catch (Exception e) {
-                    common.warningMsg('File already exists')
-                }
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"], null, true)
-                try {
-                    salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
-                }
-                node_count++
-            }
-            def control_general_target = ""
-            def control_target_hosts = salt.getMinions(pepperEnv, 'I@keystone:server and not upg*').sort()
-            node_count = 1
-
-            for (t in control_target_hosts) {
-                def target = t.split("\\.")[0]
-                control_general_target = target.replaceAll('\\d+$', "")
-                if (SKIP_VM_RELAUNCH.toBoolean() == true) {
-                    break
-                }
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:ctl0${node_count}:provider")
-                def nodeProvider = _pillar['return'][0].values()[0]
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
-                sleep(2)
-                try {
-                    salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
-                } catch (Exception e) {
-                    common.warningMsg('File already exists')
-                }
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"], null, true)
-                try {
-                    salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
-                }
-                node_count++
-            }
-
-            if (SKIP_VM_RELAUNCH.toBoolean() == false) {
-                salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
-
-                // salt 'kvm*' state.sls salt.control
-                salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
+                def control_general_target = ""
+                def control_target_hosts = salt.getMinions(pepperEnv, 'I@keystone:server and not upg*').sort()
+                node_count = 1
 
                 for (t in control_target_hosts) {
                     def target = t.split("\\.")[0]
-                    // wait until ctl and prx nodes are registered in salt-key
-                    salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
+                    control_general_target = target.replaceAll('\\d+$', "")
+                    if (SKIP_VM_RELAUNCH.toBoolean() == true) {
+                        break
+                    }
+                    _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:ctl0${node_count}:provider")
+                    def nodeProvider = _pillar['return'][0].values()[0]
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
+                    sleep(2)
+                    try {
+                        salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
+                    } catch (Exception e) {
+                        common.warningMsg('File already exists')
+                    }
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"], null, true)
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+                    } catch (Exception e) {
+                        common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+                    }
+                    node_count++
                 }
+
+                if (SKIP_VM_RELAUNCH.toBoolean() == false) {
+                    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+
+                    // salt 'kvm*' state.sls salt.control
+                    salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
+
+                    for (t in control_target_hosts) {
+                        def target = t.split("\\.")[0]
+                        // wait until ctl and prx nodes are registered in salt-key
+                        salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
+                    }
+                    for (t in proxy_target_hosts) {
+                        def target = t.split("\\.")[0]
+                        // wait until ctl and prx nodes are registered in salt-key
+                        salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
+                    }
+
+                    // salt '*' saltutil.refresh_pillar
+                    salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
+                    // salt '*' saltutil.sync_all
+                    salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+                }
+                try {
+                    salt.enforceState(pepperEnv, "${proxy_general_target}* or ${control_general_target}*", ['linux', 'openssh'])
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                try {
+                    salt.runSaltProcessStep(master, "${proxy_general_target}* or ${control_general_target}*", 'state.sls', ["salt.minion"], null, true, 60)
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                try {
+                    salt.enforceState(pepperEnv, "${proxy_general_target}* or ${control_general_target}*", ['ntp', 'rsyslog'])
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+
+                salt.enforceState(pepperEnv, "${proxy_general_target}* or ${control_general_target}*", ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+
+                // salt 'ctl*' state.sls keepalived
+                // salt 'ctl*' state.sls haproxy
+                salt.enforceState(pepperEnv, "${control_general_target}*", ['keepalived', 'haproxy'])
+                // salt 'ctl*' service.restart rsyslog
+                salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['rsyslog'], null, true)
+                // salt "ctl*" state.sls memcached
+                // salt "ctl*" state.sls keystone.server
+                try {
+                    try {
+                        salt.enforceState(pepperEnv, "${control_general_target}*", ['memcached', 'keystone.server'])
+                        salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['apache2'], null, true)
+                    } catch (Exception e) {
+                        common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
+                        salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['apache2'], null, true)
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'keystone.server')
+                    }
+                    // salt 'ctl01*' state.sls keystone.client
+                    try {
+                        salt.enforceState(pepperEnv, "I@keystone:client and ${control_general_target}*", 'keystone.client')
+                    } catch (Exception e) {
+                        common.warningMsg('running keystone.client state again')
+                        salt.enforceState(pepperEnv, "I@keystone:client and ${control_general_target}*", 'keystone.client')
+                    }
+                    try {
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'glance')
+                    } catch (Exception e) {
+                        common.warningMsg('running glance state again')
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'glance')
+                    }                // salt 'ctl*' state.sls glusterfs.client
+                    salt.enforceState(pepperEnv, "${control_general_target}*", 'glusterfs.client')
+                    // salt 'ctl*' state.sls keystone.server
+                    salt.enforceState(pepperEnv, "${control_general_target}*", 'keystone.server')
+                    // salt 'ctl*' state.sls nova
+                    try {
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'nova')
+                    } catch (Exception e) {
+                        common.warningMsg('running nova state again')
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'nova')
+                    }
+                    // salt 'ctl*' state.sls cinder
+                    try {
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'cinder')
+                    } catch (Exception e) {
+                        common.warningMsg('running cinder state again')
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'cinder')
+                    }
+                    try {
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'neutron')
+                    } catch (Exception e) {
+                        common.warningMsg('running neutron state again')
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'neutron')
+                    }
+                    // salt 'ctl*' state.sls heat
+                    try {
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'heat')
+                    } catch (Exception e) {
+                        common.warningMsg('running heat state again')
+                        salt.enforceState(pepperEnv, "${control_general_target}*", 'heat')
+                    }
+
+                } catch (Exception e) {
+                    errorOccured = true
+                    input message: "Some states that require syncdb failed. Please check the reason and click proceed only if you want to restore database into it's pre-upgrade state. Otherwise, click abort."
+                    common.warningMsg('Some states that require syncdb failed. Restoring production databases')
+
+                    // database restore section
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+                    } catch (Exception er) {
+                        common.warningMsg('Mysql service already stopped')
+                    }
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+                    } catch (Exception er) {
+                        common.warningMsg('Mysql service already stopped')
+                    }
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+                    } catch (Exception er) {
+                        common.warningMsg('Files are not present')
+                    }
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+                    } catch (Exception er) {
+                        common.warningMsg('Directory already exists')
+                    }
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+                    } catch (Exception er) {
+                        common.warningMsg('Directory already empty')
+                    }
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+                    } catch (Exception er) {
+                        common.warningMsg('Files were already moved')
+                    }
+                    try {
+                        salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+                    } catch (Exception er) {
+                        common.warningMsg('File is not present')
+                    }
+                    salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+                    _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
+                    backup_dir = _pillar['return'][0].values()[0]
+                    if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+                    print(backup_dir)
+                    salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+                    salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+                    salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
+
+                    // wait until mysql service on galera master is up
+                    salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
+
+                    salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+                    //
+
+                    common.errorMsg("Stage Real control upgrade failed")
+                }
+                if(!errorOccured){
+
+                    ceph = null
+
+                    try {
+                        ceph = salt.cmdRun(pepperEnv, "${control_general_target}*", "salt-call grains.item roles | grep ceph.client")
+
+                    } catch (Exception er) {
+                        common.infoMsg("Ceph is not used")
+                    }
+
+                    if(ceph != null) {
+                        try {
+                            salt.enforceState(pepperEnv, "${control_general_target}*", 'ceph.client')
+                        } catch (Exception er) {
+                            common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
+                        }
+                    }
+
+                    // salt 'cmp*' cmd.run 'service nova-compute restart'
+                    salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'], null, true)
+                    salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['nova-conductor'], null, true)
+                    salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['nova-scheduler'], null, true)
+
+
+                    // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
+                    // salt 'ctl*' state.sls keepalived
+                    // salt 'prx*' state.sls keepalived
+                    salt.enforceState(pepperEnv, "${proxy_general_target}*", 'keepalived')
+                    // salt 'prx*' state.sls horizon
+                    salt.enforceState(pepperEnv, "${proxy_general_target}*", 'horizon')
+                    // salt 'prx*' state.sls nginx
+                    salt.enforceState(pepperEnv, "${proxy_general_target}*", 'nginx')
+                    // salt "prx*" state.sls memcached
+                    salt.enforceState(pepperEnv, "${proxy_general_target}*", 'memcached')
+
+                    try {
+                        salt.enforceHighstate(pepperEnv, "${control_general_target}*")
+                    } catch (Exception er) {
+                        common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
+                    }
+
+                    try {
+                        salt.enforceHighstate(pepperEnv, "${proxy_general_target}*")
+                    } catch (Exception er) {
+                        common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
+                    }
+
+                    salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+                }
+            }
+
+            if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
+                stage('Ask for manual confirmation') {
+                    input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click YES to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
+                }
+            }
+        }
+
+        if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
+            stage('Rollback upgrade') {
+
+                stage('Ask for manual confirmation') {
+                    input message: "Do you really want to continue with the rollback?"
+                }
+
+                def domain = salt.getPillar(pepperEnv, 'I@salt:master', '_param:cluster_domain')
+                domain = domain['return'][0].values()[0]
+
+                _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
+                kvm01 = _pillar['return'][0].values()[0].values()[0]
+                print(_pillar)
+                print(kvm01)
+
+                def proxy_general_target = ""
+                def proxy_target_hosts = salt.getMinions(pepperEnv, 'I@horizon:server')
+                def node_count = 1
+
                 for (t in proxy_target_hosts) {
                     def target = t.split("\\.")[0]
-                    // wait until ctl and prx nodes are registered in salt-key
-                    salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
+                    proxy_general_target = target.replaceAll('\\d+$', "")
+                    if (SKIP_VM_RELAUNCH.toBoolean() == true) {
+                        break
+                    }
+                    _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:prx0${node_count}:provider")
+                    def nodeProvider = _pillar['return'][0].values()[0]
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
+                    sleep(2)
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"], null, true)
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+                    } catch (Exception e) {
+                        common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+                    }
+                    node_count++
                 }
+                def control_general_target = ""
+                def control_target_hosts = salt.getMinions(pepperEnv, 'I@keystone:server').sort()
+                node_count = 1
 
-                // salt '*' saltutil.refresh_pillar
-                salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
-                // salt '*' saltutil.sync_all
-                salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
-            }
-            try {
-                salt.enforceState(pepperEnv, "${proxy_general_target}* or ${control_general_target}*", ['linux', 'openssh'])
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            try {
-                salt.runSaltProcessStep(master, "${proxy_general_target}* or ${control_general_target}*", 'state.sls', ["salt.minion"], null, true, 60)
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            try {
-                salt.enforceState(pepperEnv, "${proxy_general_target}* or ${control_general_target}*", ['ntp', 'rsyslog'])
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-
-            salt.enforceState(pepperEnv, "${proxy_general_target}* or ${control_general_target}*", ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
-
-            // salt 'ctl*' state.sls keepalived
-            // salt 'ctl*' state.sls haproxy
-            salt.enforceState(pepperEnv, "${control_general_target}*", ['keepalived', 'haproxy'])
-            // salt 'ctl*' service.restart rsyslog
-            salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['rsyslog'], null, true)
-            // salt "ctl*" state.sls memcached
-            // salt "ctl*" state.sls keystone.server
-            try {
-                try {
-                    salt.enforceState(pepperEnv, "${control_general_target}*", ['memcached', 'keystone.server'])
-                    salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['apache2'], null, true)
-                } catch (Exception e) {
-                    common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
-                    salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['apache2'], null, true)
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'keystone.server')
+                for (t in control_target_hosts) {
+                    def target = t.split("\\.")[0]
+                    control_general_target = target.replaceAll('\\d+$', "")
+                    if (SKIP_VM_RELAUNCH.toBoolean() == true) {
+                        break
+                    }
+                    _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:ctl0${node_count}:provider")
+                    def nodeProvider = _pillar['return'][0].values()[0]
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
+                    sleep(2)
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"], null, true)
+                    try {
+                        salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+                    } catch (Exception e) {
+                        common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+                    }
+                    node_count++
                 }
-                // salt 'ctl01*' state.sls keystone.client
-                try {
-                    salt.enforceState(pepperEnv, "I@keystone:client and ${control_general_target}*", 'keystone.client')
-                } catch (Exception e) {
-                    common.warningMsg('running keystone.client state again')
-                    salt.enforceState(pepperEnv, "I@keystone:client and ${control_general_target}*", 'keystone.client')
-                }
-                try {
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'glance')
-                } catch (Exception e) {
-                    common.warningMsg('running glance state again')
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'glance')
-                }                // salt 'ctl*' state.sls glusterfs.client
-                salt.enforceState(pepperEnv, "${control_general_target}*", 'glusterfs.client')
-                // salt 'ctl*' state.sls keystone.server
-                salt.enforceState(pepperEnv, "${control_general_target}*", 'keystone.server')
-                // salt 'ctl*' state.sls nova
-                try {
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'nova')
-                } catch (Exception e) {
-                    common.warningMsg('running nova state again')
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'nova')
-                }
-                // salt 'ctl*' state.sls cinder
-                try {
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'cinder')
-                } catch (Exception e) {
-                    common.warningMsg('running cinder state again')
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'cinder')
-                }
-                try {
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'neutron')
-                } catch (Exception e) {
-                    common.warningMsg('running neutron state again')
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'neutron')
-                }
-                // salt 'ctl*' state.sls heat
-                try {
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'heat')
-                } catch (Exception e) {
-                    common.warningMsg('running heat state again')
-                    salt.enforceState(pepperEnv, "${control_general_target}*", 'heat')
-                }
-
-            } catch (Exception e) {
-                errorOccured = true
-                input message: "Some states that require syncdb failed. Please check the reason and click proceed only if you want to restore database into it's pre-upgrade state. Otherwise, click abort."
-                common.warningMsg('Some states that require syncdb failed. Restoring production databases')
 
                 // database restore section
                 try {
                     salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-                } catch (Exception er) {
+                } catch (Exception e) {
                     common.warningMsg('Mysql service already stopped')
                 }
                 try {
                     salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-                } catch (Exception er) {
+                } catch (Exception e) {
                     common.warningMsg('Mysql service already stopped')
                 }
                 try {
                     salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-                } catch (Exception er) {
+                } catch (Exception e) {
                     common.warningMsg('Files are not present')
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
-                } catch (Exception er) {
-                    common.warningMsg('Directory already exists')
-                }
-                try {
-                    salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
-                } catch (Exception er) {
+                    salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+                } catch (Exception e) {
                     common.warningMsg('Directory already empty')
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
-                } catch (Exception er) {
-                    common.warningMsg('Files were already moved')
-                }
-                try {
                     salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-                } catch (Exception er) {
+                } catch (Exception e) {
                     common.warningMsg('File is not present')
                 }
                 salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
@@ -434,201 +596,40 @@
                 salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
                 //
 
-                common.errorMsg("Stage Real control upgrade failed")
-            }
-            if(!errorOccured){
-
-                ceph = null
-
-                try {
-                    ceph = salt.cmdRun(pepperEnv, "${control_general_target}*", "salt-call grains.item roles | grep ceph.client")
-
-                } catch (Exception er) {
-                    common.infoMsg("Ceph is not used")
+                node_count = 1
+                for (t in control_target_hosts) {
+                    def target = t.split("\\.")[0]
+                    _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:ctl0${node_count}:provider")
+                    def nodeProvider = _pillar['return'][0].values()[0]
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"], null, true)
+                    node_count++
                 }
-
-                if(ceph != null) {
-                    try {
-                        salt.enforceState(pepperEnv, "${control_general_target}*", 'ceph.client')
-                    } catch (Exception er) {
-                        common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
-                    }
+                node_count = 1
+                for (t in proxy_target_hosts) {
+                    def target = t.split("\\.")[0]
+                    _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:prx0${node_count}:provider")
+                    def nodeProvider = _pillar['return'][0].values()[0]
+                    salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"], null, true)
+                    node_count++
                 }
 
                 // salt 'cmp*' cmd.run 'service nova-compute restart'
                 salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'], null, true)
+
+                for (t in control_target_hosts) {
+                    def target = t.split("\\.")[0]
+                    salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
+                }
+                for (t in proxy_target_hosts) {
+                    def target = t.split("\\.")[0]
+                    salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
+                }
+
                 salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['nova-conductor'], null, true)
                 salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['nova-scheduler'], null, true)
 
-
-                // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
-                // salt 'ctl*' state.sls keepalived
-                // salt 'prx*' state.sls keepalived
-                salt.enforceState(pepperEnv, "${proxy_general_target}*", 'keepalived')
-                // salt 'prx*' state.sls horizon
-                salt.enforceState(pepperEnv, "${proxy_general_target}*", 'horizon')
-                // salt 'prx*' state.sls nginx
-                salt.enforceState(pepperEnv, "${proxy_general_target}*", 'nginx')
-                // salt "prx*" state.sls memcached
-                salt.enforceState(pepperEnv, "${proxy_general_target}*", 'memcached')
-
-                try {
-                    salt.enforceHighstate(pepperEnv, "${control_general_target}*")
-                } catch (Exception er) {
-                    common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
-                }
-
-                try {
-                    salt.enforceHighstate(pepperEnv, "${proxy_general_target}*")
-                } catch (Exception er) {
-                    common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
-                }
-
-                salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-            }
-        }
-
-        if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
-            stage('Ask for manual confirmation') {
-                input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click YES to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
+                salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
             }
         }
     }
-
-    if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
-        stage('Rollback upgrade') {
-
-            stage('Ask for manual confirmation') {
-                input message: "Do you really want to continue with the rollback?"
-            }
-
-            def domain = salt.getPillar(pepperEnv, 'I@salt:master', '_param:cluster_domain')
-            domain = domain['return'][0].values()[0]
-
-            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
-            kvm01 = _pillar['return'][0].values()[0].values()[0]
-            print(_pillar)
-            print(kvm01)
-
-            def proxy_general_target = ""
-            def proxy_target_hosts = salt.getMinions(pepperEnv, 'I@horizon:server')
-            def node_count = 1
-
-            for (t in proxy_target_hosts) {
-                def target = t.split("\\.")[0]
-                proxy_general_target = target.replaceAll('\\d+$', "")
-                if (SKIP_VM_RELAUNCH.toBoolean() == true) {
-                    break
-                }
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:prx0${node_count}:provider")
-                def nodeProvider = _pillar['return'][0].values()[0]
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
-                sleep(2)
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"], null, true)
-                try {
-                    salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
-                }
-                node_count++
-            }
-            def control_general_target = ""
-            def control_target_hosts = salt.getMinions(pepperEnv, 'I@keystone:server').sort()
-            node_count = 1
-
-            for (t in control_target_hosts) {
-                def target = t.split("\\.")[0]
-                control_general_target = target.replaceAll('\\d+$', "")
-                if (SKIP_VM_RELAUNCH.toBoolean() == true) {
-                    break
-                }
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:ctl0${node_count}:provider")
-                def nodeProvider = _pillar['return'][0].values()[0]
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"], null, true)
-                sleep(2)
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"], null, true)
-                try {
-                    salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
-                }
-                node_count++
-            }
-
-            // database restore section
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-            } catch (Exception e) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-            } catch (Exception e) {
-                common.warningMsg('Mysql service already stopped')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-            } catch (Exception e) {
-                common.warningMsg('Files are not present')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
-            } catch (Exception e) {
-                common.warningMsg('Directory already empty')
-            }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-            } catch (Exception e) {
-                common.warningMsg('File is not present')
-            }
-            salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-            _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
-            print(backup_dir)
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
-
-            // wait until mysql service on galera master is up
-            salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
-
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
-            //
-
-            node_count = 1
-            for (t in control_target_hosts) {
-                def target = t.split("\\.")[0]
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:ctl0${node_count}:provider")
-                def nodeProvider = _pillar['return'][0].values()[0]
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"], null, true)
-                node_count++
-            }
-            node_count = 1
-            for (t in proxy_target_hosts) {
-                def target = t.split("\\.")[0]
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:prx0${node_count}:provider")
-                def nodeProvider = _pillar['return'][0].values()[0]
-                salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"], null, true)
-                node_count++
-            }
-
-            // salt 'cmp*' cmd.run 'service nova-compute restart'
-            salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'], null, true)
-
-            for (t in control_target_hosts) {
-                def target = t.split("\\.")[0]
-                salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
-            }
-            for (t in proxy_target_hosts) {
-                def target = t.split("\\.")[0]
-                salt.minionPresent(pepperEnv, 'I@salt:master', "${target}")
-            }
-
-            salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['nova-conductor'], null, true)
-            salt.runSaltProcessStep(pepperEnv, "${control_general_target}*", 'service.restart', ['nova-scheduler'], null, true)
-
-            salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
-        }
-    }
-}
+}
\ No newline at end of file
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
index 1b21618..8d131c8 100644
--- a/ovs-gateway-upgrade.groovy
+++ b/ovs-gateway-upgrade.groovy
@@ -24,133 +24,133 @@
 def command
 def commandKwargs
 def probe = 1
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
 
-node() {
-    try {
-
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        stage('List target servers') {
-            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
-            if (minions.isEmpty()) {
-                throw new Exception("No minion was targeted")
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
 
-            if (TARGET_SUBSET_TEST != "") {
-                targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
-            } else {
-                targetTestSubset = minions.join(' or ')
+            stage('List target servers') {
+                minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
+
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+
+                if (TARGET_SUBSET_TEST != "") {
+                    targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
+                } else {
+                    targetTestSubset = minions.join(' or ')
+                }
+                targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
+                targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
+                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected test nodes: ${targetTestSubset}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
             }
-            targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
-            targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
-            targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
-            targetLiveAll = minions.join(' or ')
-            common.infoMsg("Found nodes: ${targetLiveAll}")
-            common.infoMsg("Selected test nodes: ${targetTestSubset}")
-            common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-        }
 
 
-        stage("Add new repos on test nodes") {
-            salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
-        }
-
-        stage("List package upgrades") {
-            salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
-        }
-
-        stage('Confirm upgrade on sample nodes') {
-            input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
-        }
-
-        stage("Add new repos on sample nodes") {
-            salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
-        }
-
-        args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
-
-        stage('Test upgrade on sample') {
-            try {
-                salt.cmdRun(pepperEnv, targetLiveSubset, args)
-            } catch (Exception er) {
-                print(er)
+            stage("Add new repos on test nodes") {
+                salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
             }
-        }
 
-        stage('Confirm upgrade on sample') {
-            input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
-        }
-
-        command = "cmd.run"
-        args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
-        stage('Apply package upgrades on sample') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
-            salt.printSaltCommandResult(out)
-        }
-
-        args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
-        stage('Start ovs on sample nodes') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
-            salt.printSaltCommandResult(out)
-        }
-        stage("Run Neutron state on sample nodes") {
-            salt.enforceState(pepperEnv, targetLiveSubset, ['neutron'])
-        }
-
-        stage("Run Highstate on sample nodes") {
-            try {
-                salt.enforceHighstate(pepperEnv, targetLiveSubset)
-            } catch (Exception er) {
-                common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
+            stage("List package upgrades") {
+                salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
             }
-        }
 
-        stage('Confirm upgrade on all targeted nodes') {
-            timeout(time: 2, unit: 'HOURS') {
-               input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
+            stage('Confirm upgrade on sample nodes') {
+                input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
             }
-        }
 
-        stage("Add new repos on all targeted nodes") {
-            salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
-        }
-
-        args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
-        stage('Apply package upgrades on all targeted nodes') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
-            salt.printSaltCommandResult(out)
-        }
-
-        args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
-        stage('Start ovs on all targeted nodes') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
-            salt.printSaltCommandResult(out)
-        }
-        stage("Run Neutron state on all targeted nodes") {
-            salt.enforceState(pepperEnv, targetLiveAll, ['neutron'])
-        }
-
-        stage("Run Highstate on all targeted nodes") {
-            try {
-                salt.enforceHighstate(pepperEnv, targetLiveAll)
-            } catch (Exception er) {
-                common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
+            stage("Add new repos on sample nodes") {
+                salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
             }
-        }
 
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-        throw e
+            args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
+
+            stage('Test upgrade on sample') {
+                try {
+                    salt.cmdRun(pepperEnv, targetLiveSubset, args)
+                } catch (Exception er) {
+                    print(er)
+                }
+            }
+
+            stage('Confirm upgrade on sample') {
+                input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
+            }
+
+            command = "cmd.run"
+            args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+
+            stage('Apply package upgrades on sample') {
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+                salt.printSaltCommandResult(out)
+            }
+
+            args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
+
+            stage('Start ovs on sample nodes') {
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+                salt.printSaltCommandResult(out)
+            }
+            stage("Run Neutron state on sample nodes") {
+                salt.enforceState(pepperEnv, targetLiveSubset, ['neutron'])
+            }
+
+            stage("Run Highstate on sample nodes") {
+                try {
+                    salt.enforceHighstate(pepperEnv, targetLiveSubset)
+                } catch (Exception er) {
+                    common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
+                }
+            }
+
+            stage('Confirm upgrade on all targeted nodes') {
+                timeout(time: 2, unit: 'HOURS') {
+                   input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
+                }
+            }
+
+            stage("Add new repos on all targeted nodes") {
+                salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+            }
+
+            args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+
+            stage('Apply package upgrades on all targeted nodes') {
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+                salt.printSaltCommandResult(out)
+            }
+
+            args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
+
+            stage('Start ovs on all targeted nodes') {
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+                salt.printSaltCommandResult(out)
+            }
+            stage("Run Neutron state on all targeted nodes") {
+                salt.enforceState(pepperEnv, targetLiveAll, ['neutron'])
+            }
+
+            stage("Run Highstate on all targeted nodes") {
+                try {
+                    salt.enforceHighstate(pepperEnv, targetLiveAll)
+                } catch (Exception er) {
+                    common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
+                }
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        }
     }
 }
-
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 28b0bb4..b265944 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -67,42 +67,43 @@
         }
     }
 }
-
-node() {
-    try {
-        if(RELEASE_APTLY.toBoolean())
-        {
-            stage("Release Aptly"){
-                triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, '(.*)/testing', APTLY_STORAGES, '{0}/stable')
-                triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, '(.*)/stable', APTLY_STORAGES, '{0}/${MCP_VERSION}')
-            }
-        }
-        if(RELEASE_DOCKER.toBoolean())
-        {
-            stage("Release Docker"){
-                triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, MCP_VERSION, DOCKER_IMAGES)
-            }
-        }
-        if(RELEASE_GIT.toBoolean())
-        {
-            stage("Release Git"){
-                def repos = GIT_REPO_LIST.tokenize('\n')
-                def repoUrl, repoName, repoCommit, repoArray
-                for (repo in repos){
-                    if(repo.trim().indexOf(' ') == -1){
-                        throw new IllegalArgumentException("Wrong format of repository and commit input")
-                    }
-                    repoArray = repo.trim().tokenize(' ')
-                    repoName = repoArray[0]
-                    repoUrl = repoArray[1]
-                    repoCommit = repoArray[2]
-                    gitRepoAddTag(repoUrl, repoName, MCP_VERSION, GIT_CREDENTIALS, repoCommit)
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
+            if(RELEASE_APTLY.toBoolean())
+            {
+                stage("Release Aptly"){
+                    triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, '(.*)/testing', APTLY_STORAGES, '{0}/stable')
+                    triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, '(.*)/stable', APTLY_STORAGES, '{0}/${MCP_VERSION}')
                 }
             }
+            if(RELEASE_DOCKER.toBoolean())
+            {
+                stage("Release Docker"){
+                    triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, MCP_VERSION, DOCKER_IMAGES)
+                }
+            }
+            if(RELEASE_GIT.toBoolean())
+            {
+                stage("Release Git"){
+                    def repos = GIT_REPO_LIST.tokenize('\n')
+                    def repoUrl, repoName, repoCommit, repoArray
+                    for (repo in repos){
+                        if(repo.trim().indexOf(' ') == -1){
+                            throw new IllegalArgumentException("Wrong format of repository and commit input")
+                        }
+                        repoArray = repo.trim().tokenize(' ')
+                        repoName = repoArray[0]
+                        repoUrl = repoArray[1]
+                        repoCommit = repoArray[2]
+                        gitRepoAddTag(repoUrl, repoName, MCP_VERSION, GIT_CREDENTIALS, repoCommit)
+                    }
+                }
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
         }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
     }
 }
\ No newline at end of file
diff --git a/release-salt-formulas-pipeline.groovy b/release-salt-formulas-pipeline.groovy
index 4aaaec9..e3f00ad 100644
--- a/release-salt-formulas-pipeline.groovy
+++ b/release-salt-formulas-pipeline.groovy
@@ -1,32 +1,34 @@
 def common = new com.mirantis.mk.Common()
 def ssh = new com.mirantis.mk.Ssh()
-node() {
-  try{
-    stage("checkout") {
-      dir("src") {
-        ssh.prepareSshAgentKey(CREDENTIALS_ID)
-        ssh.ensureKnownHosts(SOURCE_URL)
-        git url: SOURCE_URL, branch: "master", credentialsId: CREDENTIALS_ID, poll: false
-        sh("git branch --set-upstream-to=origin/master")
-        ssh.agentSh("make update")
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+    try{
+      stage("checkout") {
+        dir("src") {
+          ssh.prepareSshAgentKey(CREDENTIALS_ID)
+          ssh.ensureKnownHosts(SOURCE_URL)
+          git url: SOURCE_URL, branch: "master", credentialsId: CREDENTIALS_ID, poll: false
+          sh("git branch --set-upstream-to=origin/master")
+          ssh.agentSh("make update")
+        }
       }
-    }
-    stage("tag") {
-      dir("src/formulas") {
-        sh("for i in *; do cd \$i; git remote | grep gerrit || git remote add gerrit $GERRIT_BASE/\$i; git config user.name Jenkins; git config user.email autobuild@mirantis.com; git tag -m $TAG $TAG; cd ..; done")
+      stage("tag") {
+        dir("src/formulas") {
+          sh("for i in *; do cd \$i; git remote | grep gerrit || git remote add gerrit $GERRIT_BASE/\$i; git config user.name Jenkins; git config user.email autobuild@mirantis.com; git tag -m $TAG $TAG; cd ..; done")
+        }
       }
-    }
-    stage("push") {
-      dir("src/formulas") {
-        ssh.agentSh("mr --trust-all -j4 --force run git push gerrit $TAG")
+      stage("push") {
+        dir("src/formulas") {
+          ssh.agentSh("mr --trust-all -j4 --force run git push gerrit $TAG")
+        }
       }
+    } catch (Throwable e) {
+       // If there was an error or exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
     }
-  } catch (Throwable e) {
-     // If there was an error or exception thrown, the build failed
-     currentBuild.result = "FAILURE"
-     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-     throw e
-  } finally {
-     common.sendNotification(currentBuild.result,"",["slack"])
   }
 }
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 586940b..ffc035a 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -12,69 +12,70 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
 
-node() {
-
-    stage('Setup virtualenv for Pepper') {
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-    }
-
-    stage('Start restore') {
-        // # actual upgrade
-
-        stage('Ask for manual confirmation') {
-            input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Cassandra?"
-        }
-        // Cassandra restore section
-        try {
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
-        } catch (Exception er) {
-            common.warningMsg('Supervisor-database service already stopped')
-        }
-        try {
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
-        } catch (Exception er) {
-            common.warningMsg('Directory already exists')
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
-        try {
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
-        } catch (Exception er) {
-            common.warningMsg('Files were already moved')
+        stage('Start restore') {
+            // # actual upgrade
+
+            stage('Ask for manual confirmation') {
+                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Cassandra?"
+            }
+            // Cassandra restore section
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Supervisor-database service already stopped')
+            }
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
+            } catch (Exception er) {
+                common.warningMsg('Directory already exists')
+            }
+
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
+            } catch (Exception er) {
+                common.warningMsg('Files were already moved')
+            }
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
+            } catch (Exception er) {
+                common.warningMsg('Directory already empty')
+            }
+
+            _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
+            backup_dir = _pillar['return'][0].values()[0]
+            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
+            print(backup_dir)
+            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+
+            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
+
+            // wait until supervisor-database service is up
+            salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+            sleep(5)
+            // performs restore
+            salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
+            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+
+            // wait until supervisor-database service is up
+            salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+            salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+            sleep(5)
+
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+
+            // wait until contrail-status is up
+            salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+            
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
         }
-        try {
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
-        } catch (Exception er) {
-            common.warningMsg('Directory already empty')
-        }
-
-        _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
-        backup_dir = _pillar['return'][0].values()[0]
-        if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
-        print(backup_dir)
-        salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-
-        salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
-
-        // wait until supervisor-database service is up
-        salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-        sleep(5)
-        // performs restore
-        salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
-        salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-
-        // wait until supervisor-database service is up
-        salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-        salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
-        sleep(5)
-
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
-
-        // wait until contrail-status is up
-        salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-        
-        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
-        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
     }
 }
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index 0f32576..d459266 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -12,78 +12,79 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
 
-node() {
-
-    stage('Setup virtualenv for Pepper') {
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-    }
-
-    stage('Start restore') {
-        // # actual upgrade
-
-        stage('Ask for manual confirmation') {
-            input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Zookeeper?"
-        }
-        // Zookeeper restore section
-        try {
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
-        } catch (Exception er) {
-            common.warningMsg('Supervisor-config service already stopped')
-        }
-        try {
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
-        } catch (Exception er) {
-            common.warningMsg('Supervisor-control service already stopped')
-        }
-        try {
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
-        } catch (Exception er) {
-            common.warningMsg('Zookeeper service already stopped')
-        }
-        //sleep(5)
-        // wait until zookeeper service is down
-        salt.commandStatus(pepperEnv, 'I@opencontrail:control', 'service zookeeper status', 'stop')
-
-        try {
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
-        } catch (Exception er) {
-            common.warningMsg('Directory already exists')
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
-        try {
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
-        } catch (Exception er) {
-            common.warningMsg('Files were already moved')
+        stage('Start restore') {
+            // # actual upgrade
+
+            stage('Ask for manual confirmation') {
+                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Zookeeper?"
+            }
+            // Zookeeper restore section
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Supervisor-config service already stopped')
+            }
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Supervisor-control service already stopped')
+            }
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Zookeeper service already stopped')
+            }
+            //sleep(5)
+            // wait until zookeeper service is down
+            salt.commandStatus(pepperEnv, 'I@opencontrail:control', 'service zookeeper status', 'stop')
+
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
+            } catch (Exception er) {
+                common.warningMsg('Directory already exists')
+            }
+
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
+            } catch (Exception er) {
+                common.warningMsg('Files were already moved')
+            }
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
+            } catch (Exception er) {
+                common.warningMsg('Directory already empty')
+            }
+
+            _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
+            backup_dir = _pillar['return'][0].values()[0]
+            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/zookeeper' }
+            print(backup_dir)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+
+            // performs restore
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
+
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
+
+            // wait until contrail-status is up
+            salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "echo stat | nc localhost 2181")
+            } catch (Exception er) {
+                common.warningMsg('Check which node is zookeeper leader')
+            }
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
         }
-        try {
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
-        } catch (Exception er) {
-            common.warningMsg('Directory already empty')
-        }
-
-        _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
-        backup_dir = _pillar['return'][0].values()[0]
-        if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/zookeeper' }
-        print(backup_dir)
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-
-        // performs restore
-        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
-
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
-        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
-
-        // wait until contrail-status is up
-        salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-
-        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
-        try {
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "echo stat | nc localhost 2181")
-        } catch (Exception er) {
-            common.warningMsg('Check which node is zookeeper leader')
-        }
-        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
     }
 }
diff --git a/rollout-config-change.groovy b/rollout-config-change.groovy
index 5c83eee..dcb9034 100644
--- a/rollout-config-change.groovy
+++ b/rollout-config-change.groovy
@@ -31,65 +31,66 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+      try {
 
-node() {
-    try {
+          stage('Run config change on test env') {
+              build job: "deploy-update-service-config", parameters: [
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: TST_SALT_MASTER_URL],
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: TST_SALT_MASTER_CREDENTIALS],
+                [$class: 'StringParameterValue', name: 'TARGET_BATCH_LIVE', value: TARGET_BATCH_LIVE],
+                [$class: 'StringParameterValue', name: 'TARGET_SERVERS', value: TARGET_SERVERS],
+                [$class: 'StringParameterValue', name: 'TARGET_STATES', value: TARGET_STATES],
+                [$class: 'StringParameterValue', name: 'TARGET_SUBSET_LIVE', value: TARGET_SUBSET_LIVE],
+                [$class: 'StringParameterValue', name: 'TARGET_SUBSET_TEST', value: TARGET_SUBSET_TEST],
+              ]
+          }
 
-        stage('Run config change on test env') {
-            build job: "deploy-update-service-config", parameters: [
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: TST_SALT_MASTER_URL],
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: TST_SALT_MASTER_CREDENTIALS],
-              [$class: 'StringParameterValue', name: 'TARGET_BATCH_LIVE', value: TARGET_BATCH_LIVE],
-              [$class: 'StringParameterValue', name: 'TARGET_SERVERS', value: TARGET_SERVERS],
-              [$class: 'StringParameterValue', name: 'TARGET_STATES', value: TARGET_STATES],
-              [$class: 'StringParameterValue', name: 'TARGET_SUBSET_LIVE', value: TARGET_SUBSET_LIVE],
-              [$class: 'StringParameterValue', name: 'TARGET_SUBSET_TEST', value: TARGET_SUBSET_TEST],
-            ]
-        }
+          stage('Test config change on test env') {
+              build job: "deploy-test-service", parameters: [
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: TST_SALT_MASTER_URL],
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: TST_SALT_MASTER_CREDENTIALS],
+                [$class: 'StringParameterValue', name: 'TEST_SERVICE', value: TEST_SERVICE],
+                [$class: 'StringParameterValue', name: 'TEST_K8S_API_SERVER', value: TEST_K8S_API_SERVER],
+                [$class: 'StringParameterValue', name: 'TEST_K8S_CONFORMANCE_IMAGE', value: TEST_K8S_CONFORMANCE_IMAGE],
+              ]
+          }
 
-        stage('Test config change on test env') {
-            build job: "deploy-test-service", parameters: [
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: TST_SALT_MASTER_URL],
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: TST_SALT_MASTER_CREDENTIALS],
-              [$class: 'StringParameterValue', name: 'TEST_SERVICE', value: TEST_SERVICE],
-              [$class: 'StringParameterValue', name: 'TEST_K8S_API_SERVER', value: TEST_K8S_API_SERVER],
-              [$class: 'StringParameterValue', name: 'TEST_K8S_CONFORMANCE_IMAGE', value: TEST_K8S_CONFORMANCE_IMAGE],
-            ]
-        }
+          stage('Promote config change in repo') {
+              build job: "git-merge-branches", parameters: [
+                [$class: 'StringParameterValue', name: 'REPO_URL', value: MODEL_REPO_URL],
+                [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: MODEL_REPO_CREDENTIALS],
+                [$class: 'StringParameterValue', name: 'SOURCE_BRANCH', value: MODEL_REPO_SOURCE_BRANCH],
+                [$class: 'StringParameterValue', name: 'TARGET_BRANCH', value: MODEL_REPO_TARGET_BRANCH],
+              ]
+          }
 
-        stage('Promote config change in repo') {
-            build job: "git-merge-branches", parameters: [
-              [$class: 'StringParameterValue', name: 'REPO_URL', value: MODEL_REPO_URL],
-              [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: MODEL_REPO_CREDENTIALS],
-              [$class: 'StringParameterValue', name: 'SOURCE_BRANCH', value: MODEL_REPO_SOURCE_BRANCH],
-              [$class: 'StringParameterValue', name: 'TARGET_BRANCH', value: MODEL_REPO_TARGET_BRANCH],
-            ]
-        }
+          stage('Run config change on production env') {
+              build job: "deploy-update-service-config", parameters: [
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: PRD_SALT_MASTER_URL],
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: PRD_SALT_MASTER_CREDENTIALS],
+                [$class: 'StringParameterValue', name: 'TARGET_BATCH_LIVE', value: TARGET_BATCH_LIVE],
+                [$class: 'StringParameterValue', name: 'TARGET_SERVERS', value: TARGET_SERVERS],
+                [$class: 'StringParameterValue', name: 'TARGET_STATES', value: TARGET_STATES],
+                [$class: 'StringParameterValue', name: 'TARGET_SUBSET_LIVE', value: TARGET_SUBSET_LIVE],
+                [$class: 'StringParameterValue', name: 'TARGET_SUBSET_TEST', value: TARGET_SUBSET_TEST],
+              ]
+          }
 
-        stage('Run config change on production env') {
-            build job: "deploy-update-service-config", parameters: [
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: PRD_SALT_MASTER_URL],
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: PRD_SALT_MASTER_CREDENTIALS],
-              [$class: 'StringParameterValue', name: 'TARGET_BATCH_LIVE', value: TARGET_BATCH_LIVE],
-              [$class: 'StringParameterValue', name: 'TARGET_SERVERS', value: TARGET_SERVERS],
-              [$class: 'StringParameterValue', name: 'TARGET_STATES', value: TARGET_STATES],
-              [$class: 'StringParameterValue', name: 'TARGET_SUBSET_LIVE', value: TARGET_SUBSET_LIVE],
-              [$class: 'StringParameterValue', name: 'TARGET_SUBSET_TEST', value: TARGET_SUBSET_TEST],
-            ]
-        }
+          stage('Test config change on prod env') {
+              def result = build job: "deploy-test-service", parameters: [
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: PRD_SALT_MASTER_URL],
+                [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: PRD_SALT_MASTER_CREDENTIALS],
+                [$class: 'StringParameterValue', name: 'TEST_SERVICE', value: TEST_SERVICE],
+                [$class: 'StringParameterValue', name: 'TEST_K8S_API_SERVER', value: TEST_K8S_API_SERVER],
+                [$class: 'StringParameterValue', name: 'TEST_K8S_CONFORMANCE_IMAGE', value: TEST_K8S_CONFORMANCE_IMAGE],
+              ]
+          }
 
-        stage('Test config change on prod env') {
-            def result = build job: "deploy-test-service", parameters: [
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: PRD_SALT_MASTER_URL],
-              [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: PRD_SALT_MASTER_CREDENTIALS],
-              [$class: 'StringParameterValue', name: 'TEST_SERVICE', value: TEST_SERVICE],
-              [$class: 'StringParameterValue', name: 'TEST_K8S_API_SERVER', value: TEST_K8S_API_SERVER],
-              [$class: 'StringParameterValue', name: 'TEST_K8S_CONFORMANCE_IMAGE', value: TEST_K8S_CONFORMANCE_IMAGE],
-            ]
-        }
-
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
-    }
+      } catch (Throwable e) {
+          currentBuild.result = 'FAILURE'
+          throw e
+      }
+  }
 }
diff --git a/tcp-qa-pipeline.groovy b/tcp-qa-pipeline.groovy
index 17a7c3e..6acadb9 100644
--- a/tcp-qa-pipeline.groovy
+++ b/tcp-qa-pipeline.groovy
@@ -113,26 +113,27 @@
 
 def runSlavesLabels = params.SLAVE_LABELS ?: 'tcp-qa-slaves'
 def runTimeout = params.TEST_TIMEOUT ?: 240
-
-node (runSlavesLabels) {
-    try {
-      timeout(time: runTimeout.toInteger(), unit: 'MINUTES') {
-        runTests()
-      }
-    }
-    catch (err) {
-        echo "Failed: ${err}"
-        currentBuild.result = 'FAILURE'
-    }
-    finally {
-        if (env.UPLOAD_RESULTS == "true") {
-            testRunUrl = uploadResults()
-            currentBuild.description = """
-            <a href="${testRunUrl}">TestRail report</a>
-            """
+timeout(time: 12, unit: 'HOURS') {
+    node (runSlavesLabels) {
+        try {
+          timeout(time: runTimeout.toInteger(), unit: 'MINUTES') {
+            runTests()
+          }
         }
-        environment.destroyEnv()
-        archiveArtifacts allowEmptyArchive: true, artifacts: 'nosetests.xml,tests.log,*.ini', excludes: null
-        junit keepLongStdio: false, testResults: 'nosetests.xml'
+        catch (err) {
+            echo "Failed: ${err}"
+            currentBuild.result = 'FAILURE'
+        }
+        finally {
+            if (env.UPLOAD_RESULTS == "true") {
+                testRunUrl = uploadResults()
+                currentBuild.description = """
+                <a href="${testRunUrl}">TestRail report</a>
+                """
+            }
+            environment.destroyEnv()
+            archiveArtifacts allowEmptyArchive: true, artifacts: 'nosetests.xml,tests.log,*.ini', excludes: null
+            junit keepLongStdio: false, testResults: 'nosetests.xml'
+        }
     }
 }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index abdecf9..836b084 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -115,87 +115,88 @@
 } catch (MissingPropertyException e) {
   gerritRef = null
 }
+timeout(time: 12, unit: 'HOURS') {
+    node("python&&docker") {
+        def templateEnv = "${env.WORKSPACE}"
+        def cutterEnv = "${env.WORKSPACE}/cutter"
+        def jinjaEnv = "${env.WORKSPACE}/jinja"
 
-node("python&&docker") {
-    def templateEnv = "${env.WORKSPACE}"
-    def cutterEnv = "${env.WORKSPACE}/cutter"
-    def jinjaEnv = "${env.WORKSPACE}/jinja"
-
-    try {
-        stage("Cleanup") {
-            sh("rm -rf * || true")
-        }
-
-        stage ('Download Cookiecutter template') {
-            if (gerritRef) {
-                def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
-                merged = gerritChange.status == "MERGED"
-                if(!merged){
-                    checkouted = gerrit.gerritPatchsetCheckout ([
-                        credentialsId : CREDENTIALS_ID
-                    ])
-                } else{
-                    common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
-                }
-            } else {
-                git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
+        try {
+            stage("Cleanup") {
+                sh("rm -rf * || true")
             }
-        }
 
-        stage("Setup") {
-            python.setupCookiecutterVirtualenv(cutterEnv)
-        }
-
-        stage("Check workflow_definition") {
-            sh "python ${env.WORKSPACE}/workflow_definition_test.py"
-        }
-
-        def contextFiles
-        dir("${templateEnv}/contexts") {
-            contextFiles = findFiles(glob: "*.yml")
-        }
-
-        def contextFileList = []
-        for (int i = 0; i < contextFiles.size(); i++) {
-            contextFileList << contextFiles[i]
-        }
-
-        stage("generate-model") {
-            for (contextFile in contextFileList) {
-                generateModel(contextFile, cutterEnv)
-            }
-        }
-
-        dir("${env.WORKSPACE}") {
-            sh(returnStatus: true, script: "tar -zcvf model.tar.gz -C model .")
-            archiveArtifacts artifacts: "model.tar.gz"
-        }
-
-        stage("test-nodes") {
-            def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
-            def buildSteps = [:]
-            for (int i = 0; i < partitions.size(); i++) {
-                def partition = partitions[i]
-                buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
-                for(int k = 0; k < partition.size; k++){
-                    def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
-                    def testEnv = "${env.WORKSPACE}/model/${basename}"
-                    buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv) })
+            stage ('Download Cookiecutter template') {
+                if (gerritRef) {
+                    def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
+                    merged = gerritChange.status == "MERGED"
+                    if(!merged){
+                        checkouted = gerrit.gerritPatchsetCheckout ([
+                            credentialsId : CREDENTIALS_ID
+                        ])
+                    } else{
+                        common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+                    }
+                } else {
+                    git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
                 }
             }
-            common.serial(buildSteps)
-        }
 
-        stage ('Clean workspace directories') {
-            sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
-        }
+            stage("Setup") {
+                python.setupCookiecutterVirtualenv(cutterEnv)
+            }
 
-    } catch (Throwable e) {
-         currentBuild.result = "FAILURE"
-         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-         throw e
-    } finally {
-         def dummy = "dummy"
-         //FAILING common.sendNotification(currentBuild.result,"",["slack"])
+            stage("Check workflow_definition") {
+                sh "python ${env.WORKSPACE}/workflow_definition_test.py"
+            }
+
+            def contextFiles
+            dir("${templateEnv}/contexts") {
+                contextFiles = findFiles(glob: "*.yml")
+            }
+
+            def contextFileList = []
+            for (int i = 0; i < contextFiles.size(); i++) {
+                contextFileList << contextFiles[i]
+            }
+
+            stage("generate-model") {
+                for (contextFile in contextFileList) {
+                    generateModel(contextFile, cutterEnv)
+                }
+            }
+
+            dir("${env.WORKSPACE}") {
+                sh(returnStatus: true, script: "tar -zcvf model.tar.gz -C model .")
+                archiveArtifacts artifacts: "model.tar.gz"
+            }
+
+            stage("test-nodes") {
+                def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
+                def buildSteps = [:]
+                for (int i = 0; i < partitions.size(); i++) {
+                    def partition = partitions[i]
+                    buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
+                    for(int k = 0; k < partition.size; k++){
+                        def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
+                        def testEnv = "${env.WORKSPACE}/model/${basename}"
+                        buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv) })
+                    }
+                }
+                common.serial(buildSteps)
+            }
+
+            stage ('Clean workspace directories') {
+                sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
+            }
+
+        } catch (Throwable e) {
+             currentBuild.result = "FAILURE"
+             currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+             throw e
+        } finally {
+             def dummy = "dummy"
+             //FAILING common.sendNotification(currentBuild.result,"",["slack"])
+        }
     }
 }
diff --git a/test-groovy-pipeline.groovy b/test-groovy-pipeline.groovy
index 61dfae7..d00d646 100644
--- a/test-groovy-pipeline.groovy
+++ b/test-groovy-pipeline.groovy
@@ -25,61 +25,62 @@
     defaultGitUrl = null
 }
 def checkouted = false
-
-node("docker"){
-    try {
-        stage("stop old tests"){
-          if (gerritRef) {
-            def runningTestBuildNums = _getRunningTriggeredTestsBuildNumbers(env["JOB_NAME"], GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER)
-            for(int i=0; i<runningTestBuildNums.size(); i++){
-              common.infoMsg("Old test with run number ${runningTestBuildNums[i]} found, stopping")
-              Jenkins.instance.getItemByFullName(env["JOB_NAME"]).getBuildByNumber(runningTestBuildNums[i]).finish(hudson.model.Result.ABORTED, new java.io.IOException("Aborting build"));
-            }
-          }
-        }
-        stage ('Checkout source code'){
-          if (gerritRef) {
-            // job is triggered by Gerrit
-            checkouted = gerrit.gerritPatchsetCheckout ([
-              credentialsId : CREDENTIALS_ID
-            ])
-          } else if(defaultGitRef && defaultGitUrl) {
-              checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
-          }
-          if(!checkouted){
-            throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
-          }
-        }
-        stage ('Run Codenarc tests'){
-            if(checkouted){
-              def workspace = common.getWorkspace()
-              def jenkinsUID = common.getJenkinsUid()
-              def jenkinsGID = common.getJenkinsGid()
-              def gradle_report = sh (script: "docker run --rm -v ${workspace}:/usr/bin/app:rw -u ${jenkinsUID}:${jenkinsGID} ${GRADLE_IMAGE} ${GRADLE_CMD}", returnStdout: true).trim()
-              // Compilation failure doesn't fail the build
-              // Check gradle output explicitly
-              common.infoMsg(gradle_report)
-              if ( gradle_report =~ /Compilation failed/ ) {
-                  throw new Exception("COMPILATION FAILED!")
+timeout(time: 12, unit: 'HOURS') {
+  node("docker"){
+      try {
+          stage("stop old tests"){
+            if (gerritRef) {
+              def runningTestBuildNums = _getRunningTriggeredTestsBuildNumbers(env["JOB_NAME"], GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER)
+              for(int i=0; i<runningTestBuildNums.size(); i++){
+                common.infoMsg("Old test with run number ${runningTestBuildNums[i]} found, stopping")
+                Jenkins.instance.getItemByFullName(env["JOB_NAME"]).getBuildByNumber(runningTestBuildNums[i]).finish(hudson.model.Result.ABORTED, new java.io.IOException("Aborting build"));
               }
             }
-        }
-
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        try{
-            def errLog = readFile('build/reports/codenarc/main.txt')
-            if(errLog){
-                common.errorMsg("Error log: ${errLog}")
+          }
+          stage ('Checkout source code'){
+            if (gerritRef) {
+              // job is triggered by Gerrit
+              checkouted = gerrit.gerritPatchsetCheckout ([
+                credentialsId : CREDENTIALS_ID
+              ])
+            } else if(defaultGitRef && defaultGitUrl) {
+                checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
             }
-        }catch(ex){
-          common.errorMsg("Exception occured while reading codenarc output file", ex)
-        }
-        throw e
-    } finally {
-        // send notification
-        common.sendNotification(currentBuild.result, "" ,["slack"])
-    }
+            if(!checkouted){
+              throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
+            }
+          }
+          stage ('Run Codenarc tests'){
+              if(checkouted){
+                def workspace = common.getWorkspace()
+                def jenkinsUID = common.getJenkinsUid()
+                def jenkinsGID = common.getJenkinsGid()
+                def gradle_report = sh (script: "docker run --rm -v ${workspace}:/usr/bin/app:rw -u ${jenkinsUID}:${jenkinsGID} ${GRADLE_IMAGE} ${GRADLE_CMD}", returnStdout: true).trim()
+                // Compilation failure doesn't fail the build
+                // Check gradle output explicitly
+                common.infoMsg(gradle_report)
+                if ( gradle_report =~ /Compilation failed/ ) {
+                    throw new Exception("COMPILATION FAILED!")
+                }
+              }
+          }
+
+      } catch (Throwable e) {
+          currentBuild.result = 'FAILURE'
+          try{
+              def errLog = readFile('build/reports/codenarc/main.txt')
+              if(errLog){
+                  common.errorMsg("Error log: ${errLog}")
+              }
+          }catch(ex){
+            common.errorMsg("Exception occured while reading codenarc output file", ex)
+          }
+          throw e
+      } finally {
+          // send notification
+          common.sendNotification(currentBuild.result, "" ,["slack"])
+      }
+  }
 }
 @NonCPS
 def _getRunningTriggeredTestsBuildNumbers(jobName, gerritChangeNumber, excludePatchsetNumber){
diff --git a/test-openstack-component-pipeline.groovy b/test-openstack-component-pipeline.groovy
index f6ba0b7..c660c28 100644
--- a/test-openstack-component-pipeline.groovy
+++ b/test-openstack-component-pipeline.groovy
@@ -13,16 +13,17 @@
 **/
 def common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
+timeout(time: 12, unit: 'HOURS') {
+    node {
+        def cred = common.getCredentials(CREDENTIALS_ID, 'key')
+        def gerritChange = gerrit.getGerritChange(cred.username, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
 
-node {
-    def cred = common.getCredentials(CREDENTIALS_ID, 'key')
-    def gerritChange = gerrit.getGerritChange(cred.username, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
-
-    stage('Trigger deploy job') {
-        build(job: STACK_DEPLOY_JOB, parameters: [
-            [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: 'mcp-oscore'],
-            [$class: 'StringParameterValue', name: 'STACK_TEST', value: ''],
-            [$class: 'BooleanParameterValue', name: 'TEST_DOCKER_INSTALL', value: false]
-        ])
+        stage('Trigger deploy job') {
+            build(job: STACK_DEPLOY_JOB, parameters: [
+                [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: 'mcp-oscore'],
+                [$class: 'StringParameterValue', name: 'STACK_TEST', value: ''],
+                [$class: 'BooleanParameterValue', name: 'TEST_DOCKER_INSTALL', value: false]
+            ])
+        }
     }
 }
diff --git a/test-run-rally.groovy b/test-run-rally.groovy
index 71e5f8b..3f2339f 100644
--- a/test-run-rally.groovy
+++ b/test-run-rally.groovy
@@ -21,39 +21,40 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
 
-node("python") {
-    try {
+            //
+            // Prepare connection
+            //
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
 
-        //
-        // Prepare connection
-        //
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
+            //
+            // Test
+            //
 
-        //
-        // Test
-        //
-
-        stage('Run OpenStack Rally scenario') {
-            test.runRallyScenarios(pepperEnv, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
-                    DO_CLEANUP_RESOURCES)
-        }
-        stage('Copy test reports') {
-            test.copyTempestResults(pepperEnv, TEST_TARGET)
-        }
-        stage('Archiving test artifacts') {
-            test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
-        }
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
-    } finally {
-        if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
-            stage('Cleanup reports and container') {
-                test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
-                test.removeDockerContainer(pepperEnv, TEST_TARGET, CONTAINER_NAME)
+            stage('Run OpenStack Rally scenario') {
+                test.runRallyScenarios(pepperEnv, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
+                        DO_CLEANUP_RESOURCES)
+            }
+            stage('Copy test reports') {
+                test.copyTempestResults(pepperEnv, TEST_TARGET)
+            }
+            stage('Archiving test artifacts') {
+                test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
+            }
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        } finally {
+            if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
+                stage('Cleanup reports and container') {
+                    test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+                    test.removeDockerContainer(pepperEnv, TEST_TARGET, CONTAINER_NAME)
+                }
             }
         }
     }
diff --git a/test-run-tempest.groovy b/test-run-tempest.groovy
index 1f8a09b..6edb276 100644
--- a/test-run-tempest.groovy
+++ b/test-run-tempest.groovy
@@ -22,36 +22,37 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
 
-node("python") {
-    try {
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
 
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
+            //
+            // Test
+            //
 
-        //
-        // Test
-        //
-
-        stage('Run OpenStack Tempest tests') {
-            test.runTempestTests(pepperEnv, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
-                    "/home/rally/keystonercv3", SET, CONCURRENCY, "mcp.conf", "mcp_skip.list", "/root/keystonercv3",
-                    "/root/rally_reports", DO_CLEANUP_RESOURCES)
-        }
-        stage('Copy test reports') {
-            test.copyTempestResults(pepperEnv, TEST_TARGET)
-        }
-        stage('Archiving test artifacts') {
-            test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
-        }
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
-    } finally {
-        if (CLEANUP_REPORTS.toBoolean()) {
-            stage('Cleanup reports') {
-                test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+            stage('Run OpenStack Tempest tests') {
+                test.runTempestTests(pepperEnv, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
+                        "/home/rally/keystonercv3", SET, CONCURRENCY, "mcp.conf", "mcp_skip.list", "/root/keystonercv3",
+                        "/root/rally_reports", DO_CLEANUP_RESOURCES)
+            }
+            stage('Copy test reports') {
+                test.copyTempestResults(pepperEnv, TEST_TARGET)
+            }
+            stage('Archiving test artifacts') {
+                test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
+            }
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
+        } finally {
+            if (CLEANUP_REPORTS.toBoolean()) {
+                stage('Cleanup reports') {
+                    test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+                }
             }
         }
     }
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index f763e5e..8f60727 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -20,76 +20,78 @@
 def checkouted = false
 
 throttle(['test-formula']) {
-  node("python") {
-    try {
-      stage("checkout") {
-        if (defaultGitRef && defaultGitUrl) {
-          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
-        } else {
-          throw new Exception("Cannot checkout gerrit patchset, DEFAULT_GIT_REF is null")
-        }
-      }
-      stage("cleanup") {
-        if (checkouted) {
-          sh("make clean")
-        }
-      }
-      stage("kitchen") {
-        if (checkouted) {
-          if (fileExists(".kitchen.yml")) {
-            common.infoMsg(".kitchen.yml found, running kitchen tests")
-            ruby.ensureRubyEnv()
-            if (fileExists(".travis.yml")) {
-              common.infoMsg(".travis.yml found, running custom kitchen init")
-              def kitchenConfigYML = readYaml(file: ".travis.yml")
-              def kitchenInit = kitchenConfigYML["install"]
-              def kitchenInstalled = false
-              if (kitchenInit && !kitchenInit.isEmpty()) {
-                for (int i = 0; i < kitchenInit.size(); i++) {
-                  if (kitchenInit[i].trim().startsWith("test -e Gemfile")) { //found Gemfile config
-                    common.infoMsg("Custom Gemfile configuration found, using them")
-                    ruby.installKitchen(kitchenInit[i].trim())
-                    kitchenInstalled = true
-                  }
-                }
-              }
-              if (!kitchenInstalled) {
-                ruby.installKitchen()
-              }
-            } else {
-              common.infoMsg(".travis.yml not found, running default kitchen init")
-              ruby.installKitchen()
-            }
-            common.infoMsg("Running part of kitchen test")
-            if (KITCHEN_ENV != null && !KITCHEN_ENV.isEmpty() && KITCHEN_ENV != "") {
-              def cleanEnv = KITCHEN_ENV.replaceAll("\\s?SUITE=[^\\s]*", "")
-              sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
-              sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
-              def suite = ruby.getSuiteName(KITCHEN_ENV)
-              if (suite && suite != "") {
-                common.infoMsg("Running kitchen test with environment:" + KITCHEN_ENV.trim())
-                ruby.runKitchenTests(cleanEnv, suite)
-              } else {
-                common.warningMsg("No SUITE was found. Running with all suites.")
-                ruby.runKitchenTests(cleanEnv, "")
-              }
-            } else {
-              throw new Exception("KITCHEN_ENV parameter is empty or invalid. This may indicate wrong env settings of initial test job or .travis.yml file.")
-            }
+  timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+      try {
+        stage("checkout") {
+          if (defaultGitRef && defaultGitUrl) {
+            checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
           } else {
-            throw new Exception(".kitchen.yml file not found, no kitchen tests triggered.")
+            throw new Exception("Cannot checkout gerrit patchset, DEFAULT_GIT_REF is null")
           }
         }
-      }
-    } catch (Throwable e) {
-      // If there was an error or exception thrown, the build failed
-      currentBuild.result = "FAILURE"
-      ruby.runKitchenCommand("destroy")
-      throw e
-    } finally {
-      if (currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")) {
-        common.errorMsg("----------------KITCHEN LOG:---------------")
-        println readFile(".kitchen/logs/kitchen.log")
+        stage("cleanup") {
+          if (checkouted) {
+            sh("make clean")
+          }
+        }
+        stage("kitchen") {
+          if (checkouted) {
+            if (fileExists(".kitchen.yml")) {
+              common.infoMsg(".kitchen.yml found, running kitchen tests")
+              ruby.ensureRubyEnv()
+              if (fileExists(".travis.yml")) {
+                common.infoMsg(".travis.yml found, running custom kitchen init")
+                def kitchenConfigYML = readYaml(file: ".travis.yml")
+                def kitchenInit = kitchenConfigYML["install"]
+                def kitchenInstalled = false
+                if (kitchenInit && !kitchenInit.isEmpty()) {
+                  for (int i = 0; i < kitchenInit.size(); i++) {
+                    if (kitchenInit[i].trim().startsWith("test -e Gemfile")) { //found Gemfile config
+                      common.infoMsg("Custom Gemfile configuration found, using them")
+                      ruby.installKitchen(kitchenInit[i].trim())
+                      kitchenInstalled = true
+                    }
+                  }
+                }
+                if (!kitchenInstalled) {
+                  ruby.installKitchen()
+                }
+              } else {
+                common.infoMsg(".travis.yml not found, running default kitchen init")
+                ruby.installKitchen()
+              }
+              common.infoMsg("Running part of kitchen test")
+              if (KITCHEN_ENV != null && !KITCHEN_ENV.isEmpty() && KITCHEN_ENV != "") {
+                def cleanEnv = KITCHEN_ENV.replaceAll("\\s?SUITE=[^\\s]*", "")
+                sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
+                sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
+                def suite = ruby.getSuiteName(KITCHEN_ENV)
+                if (suite && suite != "") {
+                  common.infoMsg("Running kitchen test with environment:" + KITCHEN_ENV.trim())
+                  ruby.runKitchenTests(cleanEnv, suite)
+                } else {
+                  common.warningMsg("No SUITE was found. Running with all suites.")
+                  ruby.runKitchenTests(cleanEnv, "")
+                }
+              } else {
+                throw new Exception("KITCHEN_ENV parameter is empty or invalid. This may indicate wrong env settings of initial test job or .travis.yml file.")
+              }
+            } else {
+              throw new Exception(".kitchen.yml file not found, no kitchen tests triggered.")
+            }
+          }
+        }
+      } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        ruby.runKitchenCommand("destroy")
+        throw e
+      } finally {
+        if (currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")) {
+          common.errorMsg("----------------KITCHEN LOG:---------------")
+          println readFile(".kitchen/logs/kitchen.log")
+        }
       }
     }
   }
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 37983c0..af22dc8 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -63,43 +63,43 @@
     [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
   ]
 }
-
-node("python") {
-  try {
-    stage("checkout") {
-      if (gerritRef) {
-        // job is triggered by Gerrit
-        def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
-        // test if gerrit change is already Verified
-        if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")) {
-          common.successMsg("Gerrit change ${GERRIT_CHANGE_NUMBER} patchset ${GERRIT_PATCHSET_NUMBER} already has Verified, skipping tests") // do nothing
-          // test WIP contains in commit message
-        } else if (gerritChange.commitMessage.contains("WIP")) {
-          common.successMsg("Commit message contains WIP, skipping tests") // do nothing
-        } else {
-          // test if change aren't already merged
-          def merged = gerritChange.status == "MERGED"
-          if (!merged) {
-            checkouted = gerrit.gerritPatchsetCheckout([
-              credentialsId: CREDENTIALS_ID
-            ])
+timeout(time: 12, unit: 'HOURS') {
+  node("python") {
+    try {
+      stage("checkout") {
+        if (gerritRef) {
+          // job is triggered by Gerrit
+          def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
+          // test if gerrit change is already Verified
+          if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")) {
+            common.successMsg("Gerrit change ${GERRIT_CHANGE_NUMBER} patchset ${GERRIT_PATCHSET_NUMBER} already has Verified, skipping tests") // do nothing
+            // test WIP contains in commit message
+          } else if (gerritChange.commitMessage.contains("WIP")) {
+            common.successMsg("Commit message contains WIP, skipping tests") // do nothing
           } else {
-            common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
+            // test if change aren't already merged
+            def merged = gerritChange.status == "MERGED"
+            if (!merged) {
+              checkouted = gerrit.gerritPatchsetCheckout([
+                credentialsId: CREDENTIALS_ID
+              ])
+            } else {
+              common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
+            }
           }
+          defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
+          defaultGitRef = GERRIT_REFSPEC
+        } else if (defaultGitRef && defaultGitUrl) {
+          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
+        } else {
+          throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
         }
-        defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
-        defaultGitRef = GERRIT_REFSPEC
-      } else if (defaultGitRef && defaultGitUrl) {
-        checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
-      } else {
-        throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
-      }
     }
     stage("test") {
       if (checkouted) {
         try {
           saltVersion = SALT_VERSION
-        } catch (MissingPropertyException e) {
+            } catch (MissingPropertyException e) {
           saltVersion = "latest"
         }
         withEnv(["SALT_VERSION=${saltVersion}"]) {
@@ -108,52 +108,53 @@
       }
     }
     stage("kitchen") {
-      if (checkouted) {
-        if (fileExists(".kitchen.yml")) {
-          common.infoMsg(".kitchen.yml found, running kitchen tests")
-          def kitchenEnvs = []
-          def filteredEnvs = []
-          if (fileExists(".travis.yml")) {
-            common.infoMsg(".travis.yml file found.")
-            def kitchenConfigYML = readYaml(file: ".travis.yml")
-            if (kitchenConfigYML.containsKey("env")) {
-              kitchenEnvs = kitchenConfigYML["env"]
+        if (checkouted) {
+          if (fileExists(".kitchen.yml")) {
+            common.infoMsg(".kitchen.yml found, running kitchen tests")
+            def kitchenEnvs = []
+            def filteredEnvs = []
+            if (fileExists(".travis.yml")) {
+              common.infoMsg(".travis.yml file found.")
+              def kitchenConfigYML = readYaml(file: ".travis.yml")
+              if (kitchenConfigYML.containsKey("env")) {
+                kitchenEnvs = kitchenConfigYML["env"]
+              }
+            } else {
+              common.warningMsg(".travis.yml file not found, suites must be passed via CUSTOM_KITCHEN_ENVS parameter.")
             }
-          } else {
-            common.warningMsg(".travis.yml file not found, suites must be passed via CUSTOM_KITCHEN_ENVS parameter.")
-          }
-          common.infoMsg("Running kitchen testing in parallel mode")
-          if (CUSTOM_KITCHEN_ENVS != null && CUSTOM_KITCHEN_ENVS != '') {
-            kitchenEnvs = CUSTOM_KITCHEN_ENVS.tokenize('\n')
-            common.infoMsg("CUSTOM_KITCHEN_ENVS not empty. Running with custom enviroments: ${kitchenEnvs}")
-          }
-          if (kitchenEnvs != null && kitchenEnvs != '') {
-            def acc = 0
-            common.infoMsg("Found " + kitchenEnvs.size() + " environment(s)")
-            for (int i = 0; i < kitchenEnvs.size(); i++) {
-              futureFormulas << kitchenEnvs[i]
+            common.infoMsg("Running kitchen testing in parallel mode")
+            if (CUSTOM_KITCHEN_ENVS != null && CUSTOM_KITCHEN_ENVS != '') {
+              kitchenEnvs = CUSTOM_KITCHEN_ENVS.tokenize('\n')
+              common.infoMsg("CUSTOM_KITCHEN_ENVS not empty. Running with custom enviroments: ${kitchenEnvs}")
             }
-            setupRunner(defaultGitRef, defaultGitUrl)
-          } else {
-            common.warningMsg(".kitchen.yml file not found, no kitchen tests triggered.")
+            if (kitchenEnvs != null && kitchenEnvs != '') {
+              def acc = 0
+              common.infoMsg("Found " + kitchenEnvs.size() + " environment(s)")
+              for (int i = 0; i < kitchenEnvs.size(); i++) {
+                futureFormulas << kitchenEnvs[i]
+              }
+              setupRunner(defaultGitRef, defaultGitUrl)
+            } else {
+              common.warningMsg(".kitchen.yml file not found, no kitchen tests triggered.")
+            }
           }
         }
       }
-    }
-    if (failedFormulas) {
+      if (failedFormulas) {
+        currentBuild.result = "FAILURE"
+        common.warningMsg("The following tests failed: ${failedFormulas}")
+      }
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
       currentBuild.result = "FAILURE"
-      common.warningMsg("The following tests failed: ${failedFormulas}")
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      throw e
+    } finally {
+      if (currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")) {
+        common.errorMsg("----------------KITCHEN LOG:---------------")
+        println readFile(".kitchen/logs/kitchen.log")
+      }
+      common.sendNotification(currentBuild.result, "", ["slack"])
     }
-  } catch (Throwable e) {
-    // If there was an error or exception thrown, the build failed
-    currentBuild.result = "FAILURE"
-    currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-    throw e
-  } finally {
-    if (currentBuild.result == "FAILURE" && fileExists(".kitchen/logs/kitchen.log")) {
-      common.errorMsg("----------------KITCHEN LOG:---------------")
-      println readFile(".kitchen/logs/kitchen.log")
-    }
-    common.sendNotification(currentBuild.result, "", ["slack"])
   }
 }
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index cc5c7be..e96bc98 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -29,55 +29,57 @@
 def checkouted = false
 
 throttle(['test-model']) {
-  node("python") {
-    try{
-      stage("checkout") {
-        if(defaultGitRef != "" && defaultGitUrl != "") {
-            checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
-        } else {
-          throw new Exception("Cannot checkout gerrit patchset, DEFAULT_GIT_URL or DEFAULT_GIT_REF is null")
-        }
-        if(checkouted) {
-          if (fileExists('classes/system')) {
-            if (SYSTEM_GIT_URL == "") {
-              ssh.prepareSshAgentKey(CREDENTIALS_ID)
-              dir('classes/system') {
-                remoteUrl = git.getGitRemote()
-                ssh.ensureKnownHosts(remoteUrl)
-              }
-              ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
-            } else {
-              dir('classes/system') {
-                if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
-                  common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+  timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+      try{
+        stage("checkout") {
+          if(defaultGitRef != "" && defaultGitUrl != "") {
+              checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
+          } else {
+            throw new Exception("Cannot checkout gerrit patchset, DEFAULT_GIT_URL or DEFAULT_GIT_REF is null")
+          }
+          if(checkouted) {
+            if (fileExists('classes/system')) {
+              if (SYSTEM_GIT_URL == "") {
+                ssh.prepareSshAgentKey(CREDENTIALS_ID)
+                dir('classes/system') {
+                  remoteUrl = git.getGitRemote()
+                  ssh.ensureKnownHosts(remoteUrl)
+                }
+                ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
+              } else {
+                dir('classes/system') {
+                  if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
+                    common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+                  }
                 }
               }
             }
           }
         }
-      }
 
-      stage("test node") {
-        if (checkouted) {
-          def workspace = common.getWorkspace()
-          common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
-          try {
-            saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger(), RECLASS_IGNORE_CLASS_NOTFOUND, LEGACY_TEST_MODE, APT_REPOSITORY, APT_REPOSITORY_GPG)
-          } catch (Exception e) {
-            if (e.getMessage() == "script returned exit code 124") {
-              common.errorMsg("Impossible to test node due to timeout of salt-master, ABORTING BUILD")
-              currentBuild.result = "ABORTED"
-            } else {
-              throw e
+        stage("test node") {
+          if (checkouted) {
+            def workspace = common.getWorkspace()
+            common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
+            try {
+              saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger(), RECLASS_IGNORE_CLASS_NOTFOUND, LEGACY_TEST_MODE, APT_REPOSITORY, APT_REPOSITORY_GPG)
+            } catch (Exception e) {
+              if (e.getMessage() == "script returned exit code 124") {
+                common.errorMsg("Impossible to test node due to timeout of salt-master, ABORTING BUILD")
+                currentBuild.result = "ABORTED"
+              } else {
+                throw e
+              }
             }
           }
         }
+      } catch (Throwable e) {
+         // If there was an error or exception thrown, the build failed
+         currentBuild.result = "FAILURE"
+         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+         throw e
       }
-    } catch (Throwable e) {
-       // If there was an error or exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
     }
   }
 }
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index ca8353e..08c37d7 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -107,108 +107,109 @@
   ]
 }
 
-
-node("python") {
-  try{
-    stage("checkout") {
-      if (gerritRef) {
-        // job is triggered by Gerrit
-        // test if change aren't already merged
-        def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
-        // test if gerrit change is already Verified
-        if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified", "+")){
-          common.successMsg("Gerrit change ${GERRIT_CHANGE_NUMBER} patchset ${GERRIT_PATCHSET_NUMBER} already has Verified, skipping tests") // do nothing
-        // test WIP contains in commit message
-        }else if (gerritChange.commitMessage.contains("WIP")) {
-          common.successMsg("Commit message contains WIP, skipping tests") // do nothing
-        } else {
-          def merged = gerritChange.status == "MERGED"
-          if(!merged){
-            checkouted = gerrit.gerritPatchsetCheckout ([
-              credentialsId : CREDENTIALS_ID
-            ])
-          } else{
-            common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
-          }
-        }
-        // defaultGitUrl is passed to the triggered job
-        defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
-        defaultGitRef = GERRIT_REFSPEC
-      } else if(defaultGitRef && defaultGitUrl) {
-          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
-      } else {
-        throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
-      }
-    }
-
-    stage("Check YAML") {
-       sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD  | grep .yml | xargs -I {}  python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
-    }
-
-    stage("test-nodes") {
-      if(checkouted) {
-        def modifiedClusters = null
-
+timeout(time: 12, unit: 'HOURS') {
+  node("python") {
+    try{
+      stage("checkout") {
         if (gerritRef) {
-          checkChange = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v classes/cluster", returnStatus: true)
-          if (checkChange == 1) {
-            modifiedClusters = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep classes/cluster/ | awk -F/ '{print \$3}' | uniq", returnStdout: true).tokenize()
+          // job is triggered by Gerrit
+          // test if change aren't already merged
+          def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
+          // test if gerrit change is already Verified
+          if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified", "+")){
+            common.successMsg("Gerrit change ${GERRIT_CHANGE_NUMBER} patchset ${GERRIT_PATCHSET_NUMBER} already has Verified, skipping tests") // do nothing
+          // test WIP contains in commit message
+          }else if (gerritChange.commitMessage.contains("WIP")) {
+            common.successMsg("Commit message contains WIP, skipping tests") // do nothing
+          } else {
+            def merged = gerritChange.status == "MERGED"
+            if(!merged){
+              checkouted = gerrit.gerritPatchsetCheckout ([
+                credentialsId : CREDENTIALS_ID
+              ])
+            } else{
+              common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
+            }
           }
-        }
-
-        def infraYMLs = sh(script: "find ./classes/ -regex '.*cluster/[-_a-zA-Z0-9]*/[infra/]*init\\.yml' -exec grep -il 'cluster_name' {} \\;", returnStdout: true).tokenize()
-        def clusterDirectories = sh(script: "ls -d ./classes/cluster/*/ | awk -F/ '{print \$4}'", returnStdout: true).tokenize()
-
-        // create a list of cluster names present in cluster folder
-        def infraList = []
-        for (elt in infraYMLs) {
-          infraList << elt.tokenize('/')[3]
-        }
-
-        // verify we have all valid clusters loaded
-        def commonList = infraList.intersect(clusterDirectories)
-        def differenceList = infraList.plus(clusterDirectories)
-        differenceList.removeAll(commonList)
-
-        if(!differenceList.isEmpty()){
-          common.warningMsg("The following clusters are not valid : ${differenceList} - That means we cannot found cluster_name in init.yml or infra/init.yml")
-        }
-        if (modifiedClusters) {
-          infraYMLs.removeAll { !modifiedClusters.contains(it.tokenize('/')[3]) }
-          common.infoMsg("Testing only modified clusters: ${infraYMLs}")
-        }
-
-        for (int i = 0; i < infraYMLs.size(); i++) {
-          def infraYMLConfig = readYaml(file: infraYMLs[i])
-          if(!infraYMLConfig["parameters"].containsKey("_param")){
-              common.warningMsg("ERROR: Cannot find soft params (_param) in file " + infraYMLs[i] + " for obtain a cluster info. Skipping test.")
-              continue
-          }
-          def infraParams = infraYMLConfig["parameters"]["_param"];
-          if(!infraParams.containsKey("infra_config_hostname") || !infraParams.containsKey("cluster_name") || !infraParams.containsKey("cluster_domain")){
-              common.warningMsg("ERROR: Cannot find _param:infra_config_hostname or _param:cluster_name or _param:cluster_domain  in file " + infraYMLs[i] + " for obtain a cluster info. Skipping test.")
-              continue
-          }
-          def clusterName = infraParams["cluster_name"]
-          def clusterDomain = infraParams["cluster_domain"]
-          def configHostname = infraParams["infra_config_hostname"]
-          def testTarget = String.format("%s.%s", configHostname, clusterDomain)
-
-          futureNodes << [defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource]
-        }
-
-        setupRunner()
-
-        if (failedNodes) {
-          currentBuild.result = "FAILURE"
+          // defaultGitUrl is passed to the triggered job
+          defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
+          defaultGitRef = GERRIT_REFSPEC
+        } else if(defaultGitRef && defaultGitUrl) {
+            checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
+        } else {
+          throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
         }
       }
+
+      stage("Check YAML") {
+         sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD  | grep .yml | xargs -I {}  python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
+      }
+
+      stage("test-nodes") {
+        if(checkouted) {
+          def modifiedClusters = null
+
+          if (gerritRef) {
+            checkChange = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v classes/cluster", returnStatus: true)
+            if (checkChange == 1) {
+              modifiedClusters = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep classes/cluster/ | awk -F/ '{print \$3}' | uniq", returnStdout: true).tokenize()
+            }
+          }
+
+          def infraYMLs = sh(script: "find ./classes/ -regex '.*cluster/[-_a-zA-Z0-9]*/[infra/]*init\\.yml' -exec grep -il 'cluster_name' {} \\;", returnStdout: true).tokenize()
+          def clusterDirectories = sh(script: "ls -d ./classes/cluster/*/ | awk -F/ '{print \$4}'", returnStdout: true).tokenize()
+
+          // create a list of cluster names present in cluster folder
+          def infraList = []
+          for (elt in infraYMLs) {
+            infraList << elt.tokenize('/')[3]
+          }
+
+          // verify we have all valid clusters loaded
+          def commonList = infraList.intersect(clusterDirectories)
+          def differenceList = infraList.plus(clusterDirectories)
+          differenceList.removeAll(commonList)
+
+          if(!differenceList.isEmpty()){
+            common.warningMsg("The following clusters are not valid : ${differenceList} - That means we cannot found cluster_name in init.yml or infra/init.yml")
+          }
+          if (modifiedClusters) {
+            infraYMLs.removeAll { !modifiedClusters.contains(it.tokenize('/')[3]) }
+            common.infoMsg("Testing only modified clusters: ${infraYMLs}")
+          }
+
+          for (int i = 0; i < infraYMLs.size(); i++) {
+            def infraYMLConfig = readYaml(file: infraYMLs[i])
+            if(!infraYMLConfig["parameters"].containsKey("_param")){
+                common.warningMsg("ERROR: Cannot find soft params (_param) in file " + infraYMLs[i] + " for obtain a cluster info. Skipping test.")
+                continue
+            }
+            def infraParams = infraYMLConfig["parameters"]["_param"];
+            if(!infraParams.containsKey("infra_config_hostname") || !infraParams.containsKey("cluster_name") || !infraParams.containsKey("cluster_domain")){
+                common.warningMsg("ERROR: Cannot find _param:infra_config_hostname or _param:cluster_name or _param:cluster_domain  in file " + infraYMLs[i] + " for obtain a cluster info. Skipping test.")
+                continue
+            }
+            def clusterName = infraParams["cluster_name"]
+            def clusterDomain = infraParams["cluster_domain"]
+            def configHostname = infraParams["infra_config_hostname"]
+            def testTarget = String.format("%s.%s", configHostname, clusterDomain)
+
+            futureNodes << [defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource]
+          }
+
+          setupRunner()
+
+          if (failedNodes) {
+            currentBuild.result = "FAILURE"
+          }
+        }
+      }
+    } catch (Throwable e) {
+       currentBuild.result = "FAILURE"
+       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
     }
-  } catch (Throwable e) {
-     currentBuild.result = "FAILURE"
-     currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-     throw e
-  } finally {
-     common.sendNotification(currentBuild.result,"",["slack"])
   }
 }
diff --git a/test-service.groovy b/test-service.groovy
index 0b6da19..f9c34e3 100644
--- a/test-service.groovy
+++ b/test-service.groovy
@@ -23,73 +23,74 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
 
-node("python") {
-    try {
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
 
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            //
+            // Test
+            //
+            def artifacts_dir = '_artifacts/'
+
+            if (common.checkContains('TEST_SERVICE', 'k8s')) {
+                stage('Run k8s bootstrap tests') {
+                    def image = 'tomkukral/k8s-scripts'
+                    def output_file = image.replaceAll('/', '-') + '.output'
+
+                    // run image
+                    test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
+
+                    // collect output
+                    sh "mkdir -p ${artifacts_dir}"
+                    file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
+                    writeFile file: "${artifacts_dir}${output_file}", text: file_content
+                    sh "cat ${artifacts_dir}${output_file}"
+
+                    // collect artifacts
+                    archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+                }
+
+                stage('Run k8s conformance e2e tests') {
+                    def image = K8S_CONFORMANCE_IMAGE
+                    def output_file = image.replaceAll('/', '-') + '.output'
+
+                    // run image
+                    test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
+
+                    // collect output
+                    sh "mkdir -p ${artifacts_dir}"
+                    file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
+                    writeFile file: "${artifacts_dir}${output_file}", text: file_content
+                    sh "cat ${artifacts_dir}${output_file}"
+
+                    // collect artifacts
+                    archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+                }
+            }
+
+            if (common.checkContains('TEST_SERVICE', 'openstack')) {
+                if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
+                    test.install_docker(pepperEnv, TEST_TEMPEST_TARGET)
+                }
+
+                stage('Run OpenStack tests') {
+                    test.runTempestTests(pepperEnv, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
+                }
+
+                writeFile(file: 'report.xml', text: salt.getFileContent(pepperEnv, TEST_TEMPEST_TARGET, '/root/report.xml'))
+                junit(keepLongStdio: true, testResults: 'report.xml', healthScaleFactor:  Double.parseDouble(TEST_JUNIT_RATIO))
+                def testResults = test.collectJUnitResults(currentBuild.rawBuild.getAction(hudson.tasks.test.AbstractTestResultAction.class))
+                if(testResults){
+                    currentBuild.desc = String.format("result: %s", testResults["failed"] / testResults["total"])
+                }
+            }
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
         }
-
-        //
-        // Test
-        //
-        def artifacts_dir = '_artifacts/'
-
-        if (common.checkContains('TEST_SERVICE', 'k8s')) {
-            stage('Run k8s bootstrap tests') {
-                def image = 'tomkukral/k8s-scripts'
-                def output_file = image.replaceAll('/', '-') + '.output'
-
-                // run image
-                test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
-
-                // collect output
-                sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                sh "cat ${artifacts_dir}${output_file}"
-
-                // collect artifacts
-                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-            }
-
-            stage('Run k8s conformance e2e tests') {
-                def image = K8S_CONFORMANCE_IMAGE
-                def output_file = image.replaceAll('/', '-') + '.output'
-
-                // run image
-                test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
-
-                // collect output
-                sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-                sh "cat ${artifacts_dir}${output_file}"
-
-                // collect artifacts
-                archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
-            }
-        }
-
-        if (common.checkContains('TEST_SERVICE', 'openstack')) {
-            if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
-                test.install_docker(pepperEnv, TEST_TEMPEST_TARGET)
-            }
-
-            stage('Run OpenStack tests') {
-                test.runTempestTests(pepperEnv, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
-            }
-
-            writeFile(file: 'report.xml', text: salt.getFileContent(pepperEnv, TEST_TEMPEST_TARGET, '/root/report.xml'))
-            junit(keepLongStdio: true, testResults: 'report.xml', healthScaleFactor:  Double.parseDouble(TEST_JUNIT_RATIO))
-            def testResults = test.collectJUnitResults(currentBuild.rawBuild.getAction(hudson.tasks.test.AbstractTestResultAction.class))
-            if(testResults){
-                currentBuild.desc = String.format("result: %s", testResults["failed"] / testResults["total"])
-            }
-        }
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
     }
 }
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index 6e3cb11..7fc1181 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -26,72 +26,74 @@
 def checkouted = false
 def merged = false
 def systemRefspec = "HEAD"
-node() {
-  try {
-    stage("Checkout") {
-      if (gerritRef) {
-        // job is triggered by Gerrit
-        // test if change aren't already merged
-        def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, gerritCredentials)
-        merged = gerritChange.status == "MERGED"
-        if(!merged){
-          checkouted = gerrit.gerritPatchsetCheckout ([
-            credentialsId : gerritCredentials
-          ])
-          systemRefspec = GERRIT_REFSPEC
-        }
-        // change defaultGit variables if job triggered from Gerrit
-        defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
-      } else if(defaultGitRef && defaultGitUrl) {
-          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", gerritCredentials)
-      }
-    }
-
-    stage("Test") {
-      if(merged){
-        common.successMsg("Gerrit change is already merged, no need to test them")
-      }else{
-        if(checkouted){
-
-          def documentationOnly = false
-          if (gerritRef) {
-            documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+    try {
+      stage("Checkout") {
+        if (gerritRef) {
+          // job is triggered by Gerrit
+          // test if change aren't already merged
+          def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, gerritCredentials)
+          merged = gerritChange.status == "MERGED"
+          if(!merged){
+            checkouted = gerrit.gerritPatchsetCheckout ([
+              credentialsId : gerritCredentials
+            ])
+            systemRefspec = GERRIT_REFSPEC
           }
+          // change defaultGit variables if job triggered from Gerrit
+          defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
+        } else if(defaultGitRef && defaultGitUrl) {
+            checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", gerritCredentials)
+        }
+      }
 
-          sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD  | grep .yml | xargs -I {}  python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
+      stage("Test") {
+        if(merged){
+          common.successMsg("Gerrit change is already merged, no need to test them")
+        }else{
+          if(checkouted){
 
-          def branches = [:]
-          def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
-          for (int i = 0; i < testModels.size(); i++) {
-            def cluster = testModels[i]
-            def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
-            branches["${cluster}"] = {
-              build job: "test-salt-model-${cluster}", parameters: [
-                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
-                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+            def documentationOnly = false
+            if (gerritRef) {
+              documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
+            }
+
+            sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD  | grep .yml | xargs -I {}  python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
+
+            def branches = [:]
+            def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
+            for (int i = 0; i < testModels.size(); i++) {
+              def cluster = testModels[i]
+              def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
+              branches["${cluster}"] = {
+                build job: "test-salt-model-${cluster}", parameters: [
+                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
+                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+                  [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
+                  [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
+                ]
+              }
+            }
+            branches["cookiecutter"] = {
+              build job: "test-mk-cookiecutter-templates", parameters: [
                 [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
                 [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
               ]
             }
+            parallel branches
+          }else{
+             throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
           }
-          branches["cookiecutter"] = {
-            build job: "test-mk-cookiecutter-templates", parameters: [
-              [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
-              [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
-            ]
-          }
-          parallel branches
-        }else{
-           throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
         }
       }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+        throw e
+    } finally {
+        common.sendNotification(currentBuild.result,"",["slack"])
     }
-  } catch (Throwable e) {
-      // If there was an error or exception thrown, the build failed
-      currentBuild.result = "FAILURE"
-      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-      throw e
-  } finally {
-      common.sendNotification(currentBuild.result,"",["slack"])
   }
 }
diff --git a/update-jenkins-master-jobs.groovy b/update-jenkins-master-jobs.groovy
index 1361632..1b0d549 100644
--- a/update-jenkins-master-jobs.groovy
+++ b/update-jenkins-master-jobs.groovy
@@ -15,21 +15,22 @@
 def pepperEnv = "pepperEnv"
 def target = ['expression': TARGET_SERVERS, 'type': 'compound']
 def result
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
 
-node("python") {
-    try {
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
 
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            stage('Update Jenkins jobs') {
+                result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'jenkins.client')
+                salt.checkResult(result)
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
         }
-
-        stage('Update Jenkins jobs') {
-            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'jenkins.client')
-            salt.checkResult(result)
-        }
-
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
     }
 }
diff --git a/update-mirror-image.groovy b/update-mirror-image.groovy
index 238dbb2..8bde843 100644
--- a/update-mirror-image.groovy
+++ b/update-mirror-image.groovy
@@ -22,82 +22,83 @@
 def salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
 def venvPepper = "venvPepper"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
+            python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-node() {
-    try {
-        python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            if(UPDATE_APTLY.toBoolean()){
+                stage('Update Aptly mirrors'){
+                    def aptlyMirrorArgs = "-s -v"
 
-        if(UPDATE_APTLY.toBoolean()){
-            stage('Update Aptly mirrors'){
-                def aptlyMirrorArgs = "-s -v"
+                    salt.enforceState(venvPepper, '*apt*', ['aptly.server'], true)
+                    sleep(10)
 
-                salt.enforceState(venvPepper, '*apt*', ['aptly.server'], true)
-                sleep(10)
-
-                if(UPDATE_APTLY_MIRRORS != ""){
-                    common.infoMsg("Updating List of Aptly mirrors.")
-                    UPDATE_APTLY_MIRRORS = UPDATE_APTLY_MIRRORS.replaceAll("\\s","")
-                    def mirrors = UPDATE_APTLY_MIRRORS.tokenize(",")
-                    for(mirror in mirrors){
-                        salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs} -m ${mirror}\"", 'runas=aptly'], null, true)
+                    if(UPDATE_APTLY_MIRRORS != ""){
+                        common.infoMsg("Updating List of Aptly mirrors.")
+                        UPDATE_APTLY_MIRRORS = UPDATE_APTLY_MIRRORS.replaceAll("\\s","")
+                        def mirrors = UPDATE_APTLY_MIRRORS.tokenize(",")
+                        for(mirror in mirrors){
+                            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs} -m ${mirror}\"", 'runas=aptly'], null, true)
+                        }
+                    }
+                    else{
+                        common.infoMsg("Updating all Aptly mirrors.")
+                        salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs}\"", 'runas=aptly'], null, true)
                     }
                 }
-                else{
-                    common.infoMsg("Updating all Aptly mirrors.")
-                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs}\"", 'runas=aptly'], null, true)
+            }
+            if(PUBLISH_APTLY.toBoolean()){
+                def aptlyPublishArgs = "-av"
+
+                common.infoMsg("Publishing all Aptly snapshots.")
+
+                salt.enforceState(venvPepper, '*apt*', ['aptly.publisher'], true)
+                sleep(10)
+
+                if(CLEANUP_APTLY.toBoolean()){
+                    aptlyPublishArgs += "c"
+                }
+                if(RECREATE_APTLY_PUBLISHES.toBoolean()){
+                    aptlyPublishArgs += "r"
+                }
+                if(FORCE_OVERWRITE_APTLY_PUBLISHES.toBoolean()){
+                    aptlyPublishArgs += "f"
+                }
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=\"${aptlyPublishArgs}\"", 'runas=aptly'], null, true)
+            }
+            if(UPDATE_DOCKER_REGISTRY.toBoolean()){
+                stage('Update Docker images'){
+                    common.infoMsg("Updating Docker images.")
+                    salt.enforceState(venvPepper, '*apt*', ['docker.client.registry'], true)
+                    if(CLEANUP_DOCKER_CACHE.toBoolean()){
+                        salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['docker system prune --all --force'], null, true)
+                    }
                 }
             }
-        }
-        if(PUBLISH_APTLY.toBoolean()){
-            def aptlyPublishArgs = "-av"
-
-            common.infoMsg("Publishing all Aptly snapshots.")
-
-            salt.enforceState(venvPepper, '*apt*', ['aptly.publisher'], true)
-            sleep(10)
-
-            if(CLEANUP_APTLY.toBoolean()){
-                aptlyPublishArgs += "c"
-            }
-            if(RECREATE_APTLY_PUBLISHES.toBoolean()){
-                aptlyPublishArgs += "r"
-            }
-            if(FORCE_OVERWRITE_APTLY_PUBLISHES.toBoolean()){
-                aptlyPublishArgs += "f"
-            }
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=\"${aptlyPublishArgs}\"", 'runas=aptly'], null, true)
-        }
-        if(UPDATE_DOCKER_REGISTRY.toBoolean()){
-            stage('Update Docker images'){
-                common.infoMsg("Updating Docker images.")
-                salt.enforceState(venvPepper, '*apt*', ['docker.client.registry'], true)
-                if(CLEANUP_DOCKER_CACHE.toBoolean()){
-                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['docker system prune --all --force'], null, true)
+            if(UPDATE_PYPI.toBoolean()){
+                stage('Update PyPi packages'){
+                    common.infoMsg("Updating PyPi packages.")
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
                 }
             }
-        }
-        if(UPDATE_PYPI.toBoolean()){
-            stage('Update PyPi packages'){
-                common.infoMsg("Updating PyPi packages.")
-                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+            if(UPDATE_GIT.toBoolean()){
+                stage('Update Git repositories'){
+                    common.infoMsg("Updating Git repositories.")
+                    salt.enforceState(venvPepper, '*apt*', ['git.server'], true)
+                }
             }
-        }
-        if(UPDATE_GIT.toBoolean()){
-            stage('Update Git repositories'){
-                common.infoMsg("Updating Git repositories.")
-                salt.enforceState(venvPepper, '*apt*', ['git.server'], true)
+            if(UPDATE_IMAGES.toBoolean()){
+                stage('Update VM images'){
+                    common.infoMsg("Updating VM images.")
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+                }
             }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
         }
-        if(UPDATE_IMAGES.toBoolean()){
-            stage('Update VM images'){
-                common.infoMsg("Updating VM images.")
-                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
-            }
-        }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-        throw e
     }
 }
\ No newline at end of file
diff --git a/update-package.groovy b/update-package.groovy
index c71f598..790e2ac 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -25,89 +25,90 @@
 def packages
 def command
 def commandKwargs
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
 
-node() {
-    try {
-
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        stage('List target servers') {
-            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
-            if (minions.isEmpty()) {
-                throw new Exception("No minion was targeted")
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
 
-            if (TARGET_SUBSET_TEST != "") {
-                targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
-            } else {
-                targetTestSubset = minions.join(' or ')
+            stage('List target servers') {
+                minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
+
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+
+                if (TARGET_SUBSET_TEST != "") {
+                    targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
+                } else {
+                    targetTestSubset = minions.join(' or ')
+                }
+                targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected test nodes: ${targetTestSubset}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
             }
-            targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
 
-            targetLiveAll = minions.join(' or ')
-            common.infoMsg("Found nodes: ${targetLiveAll}")
-            common.infoMsg("Selected test nodes: ${targetTestSubset}")
-            common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
-        }
-
-        stage("List package upgrades") {
-            common.infoMsg("Listing all the packages that have a new update available on test nodes: ${targetTestSubset}")
-            salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
-            if(TARGET_PACKAGES != "" && TARGET_PACKAGES != "*"){
-                common.infoMsg("Note that only the ${TARGET_PACKAGES} would be installed from the above list of available updates on the ${targetTestSubset}")
+            stage("List package upgrades") {
+                common.infoMsg("Listing all the packages that have a new update available on test nodes: ${targetTestSubset}")
+                salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+                if(TARGET_PACKAGES != "" && TARGET_PACKAGES != "*"){
+                    common.infoMsg("Note that only the ${TARGET_PACKAGES} would be installed from the above list of available updates on the ${targetTestSubset}")
+                }
             }
-        }
 
-        stage('Confirm live package upgrades on sample') {
-            if(TARGET_PACKAGES==""){
-                timeout(time: 2, unit: 'HOURS') {
-                    def userInput = input(
-                     id: 'userInput', message: 'Insert package names for update', parameters: [
-                     [$class: 'TextParameterDefinition', defaultValue: '', description: 'Package names (or *)', name: 'packages']
-                    ])
-                    if(userInput!= "" && userInput!= "*"){
-                        TARGET_PACKAGES = userInput
+            stage('Confirm live package upgrades on sample') {
+                if(TARGET_PACKAGES==""){
+                    timeout(time: 2, unit: 'HOURS') {
+                        def userInput = input(
+                         id: 'userInput', message: 'Insert package names for update', parameters: [
+                         [$class: 'TextParameterDefinition', defaultValue: '', description: 'Package names (or *)', name: 'packages']
+                        ])
+                        if(userInput!= "" && userInput!= "*"){
+                            TARGET_PACKAGES = userInput
+                        }
+                    }
+                }else{
+                    timeout(time: 2, unit: 'HOURS') {
+                       input message: "Approve live package upgrades on ${targetLiveSubset} nodes?"
                     }
                 }
-            }else{
+            }
+
+            if (TARGET_PACKAGES != "") {
+                command = "pkg.install"
+                packages = TARGET_PACKAGES.tokenize(' ')
+                commandKwargs = ['only_upgrade': 'true']
+            }else {
+                command = "pkg.upgrade"
+                packages = null
+            }
+
+            stage('Apply package upgrades on sample') {
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
+                salt.printSaltCommandResult(out)
+            }
+
+            stage('Confirm package upgrades on all nodes') {
                 timeout(time: 2, unit: 'HOURS') {
-                   input message: "Approve live package upgrades on ${targetLiveSubset} nodes?"
+                   input message: "Approve live package upgrades on ${targetLiveAll} nodes?"
                 }
             }
-        }
 
-        if (TARGET_PACKAGES != "") {
-            command = "pkg.install"
-            packages = TARGET_PACKAGES.tokenize(' ')
-            commandKwargs = ['only_upgrade': 'true']
-        }else {
-            command = "pkg.upgrade"
-            packages = null
-        }
-
-        stage('Apply package upgrades on sample') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
-            salt.printSaltCommandResult(out)
-        }
-
-        stage('Confirm package upgrades on all nodes') {
-            timeout(time: 2, unit: 'HOURS') {
-               input message: "Approve live package upgrades on ${targetLiveAll} nodes?"
+            stage('Apply package upgrades on all nodes') {
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
+                salt.printSaltCommandResult(out)
             }
-        }
 
-        stage('Apply package upgrades on all nodes') {
-            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
-            salt.printSaltCommandResult(out)
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
         }
-
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-        throw e
     }
 }
diff --git a/update-reclass-metadata.groovy b/update-reclass-metadata.groovy
index be695ca..cb3bd06 100644
--- a/update-reclass-metadata.groovy
+++ b/update-reclass-metadata.groovy
@@ -15,22 +15,23 @@
 def pepperEnv = "pepperEnv"
 def target = ['expression': TARGET_SERVERS, 'type': 'compound']
 def result
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
 
-node("python") {
-    try {
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
 
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            stage('Update Reclass model') {
+                result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage')
+                result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage.node')
+                salt.checkResult(result)
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
         }
-
-        stage('Update Reclass model') {
-            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage')
-            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage.node')
-            salt.checkResult(result)
-        }
-
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
     }
 }
diff --git a/update-salt-environment.groovy b/update-salt-environment.groovy
index 0b570fc..b91f385 100644
--- a/update-salt-environment.groovy
+++ b/update-salt-environment.groovy
@@ -13,42 +13,43 @@
 def common = new com.mirantis.mk.Common()
 def python = new com.mirantis.mk.Python()
 def venvPepper = "venvPepper"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
+            python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-node() {
-    try {
-        python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-        stage("Update formulas"){
-            if(UPDATE_FORMULAS.toBoolean()){
-                common.infoMsg("Updating salt formulas")
+            stage("Update formulas"){
+                if(UPDATE_FORMULAS.toBoolean()){
+                    common.infoMsg("Updating salt formulas")
+                    salt.cmdRun(
+                        venvPepper,
+                        "I@salt:master",
+                        'apt-get update && apt-get install -y salt-formula-*'
+                    )
+                    common.infoMsg("Running salt sync-all")
+                    salt.runSaltProcessStep(venvPepper, 'jma*', 'saltutil.sync_all', [], null, true)
+                }
+            }
+            stage("Update Reclass") {
+                common.infoMsg("Updating reclass model")
                 salt.cmdRun(
                     venvPepper,
                     "I@salt:master",
-                    'apt-get update && apt-get install -y salt-formula-*'
+                    'cd /srv/salt/reclass && git pull -r && git submodule update',
+                    false
                 )
-                common.infoMsg("Running salt sync-all")
-                salt.runSaltProcessStep(venvPepper, 'jma*', 'saltutil.sync_all', [], null, true)
-            }
-        }
-        stage("Update Reclass") {
-            common.infoMsg("Updating reclass model")
-            salt.cmdRun(
-                venvPepper,
-                "I@salt:master",
-                'cd /srv/salt/reclass && git pull -r && git submodule update',
-                false
-            )
 
-            salt.enforceState(
-                venvPepper,
-                "I@salt:master",
-                'reclass',
-                true
-            )
+                salt.enforceState(
+                    venvPepper,
+                    "I@salt:master",
+                    'reclass',
+                    true
+                )
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
         }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
     }
 }
\ No newline at end of file
diff --git a/update-salt-master-formulas.groovy b/update-salt-master-formulas.groovy
index 9d556f0..d029898 100644
--- a/update-salt-master-formulas.groovy
+++ b/update-salt-master-formulas.groovy
@@ -15,21 +15,22 @@
 def pepperEnv = "pepperEnv"
 def target = ['expression': TARGET_SERVERS, 'type': 'compound']
 def result
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
 
-node("python") {
-    try {
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            }
 
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            stage('Update Salt formulas') {
+                result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'salt.master.env')
+                salt.checkResult(result)
+            }
+
+        } catch (Throwable e) {
+            currentBuild.result = 'FAILURE'
+            throw e
         }
-
-        stage('Update Salt formulas') {
-            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'salt.master.env')
-            salt.checkResult(result)
-        }
-
-    } catch (Throwable e) {
-        currentBuild.result = 'FAILURE'
-        throw e
     }
 }
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 9bdce1a..d8955e3 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -14,52 +14,53 @@
 def common = new com.mirantis.mk.Common()
 def python = new com.mirantis.mk.Python()
 def venvPepper = "venvPepper"
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+        try {
+            python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-node("python") {
-    try {
-        python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            stage("Update Reclass"){
+                common.infoMsg("Updating reclass model")
+                salt.cmdRun(venvPepper, "I@salt:master", 'cd /srv/salt/reclass && git pull -r && git submodule update', false)
+                salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', ['reclass-salt --top'], null, true)
+                salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
+            }
 
-        stage("Update Reclass"){
-            common.infoMsg("Updating reclass model")
-            salt.cmdRun(venvPepper, "I@salt:master", 'cd /srv/salt/reclass && git pull -r && git submodule update', false)
-            salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', ['reclass-salt --top'], null, true)
-            salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
-        }
+            if(UPDATE_LOCAL_REPOS.toBoolean()){
+                stage("Update local repos"){
+                    common.infoMsg("Updating local repositories")
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", 'runas=aptly'], null, true)
+                    salt.enforceState(venvPepper, '*apt*', 'aptly', true)
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acfrv", 'runas=aptly'], null, true)
 
-        if(UPDATE_LOCAL_REPOS.toBoolean()){
-            stage("Update local repos"){
-                common.infoMsg("Updating local repositories")
-                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", 'runas=aptly'], null, true)
-                salt.enforceState(venvPepper, '*apt*', 'aptly', true)
-                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
-                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acfrv", 'runas=aptly'], null, true)
+                    salt.enforceState(venvPepper, '*apt*', 'docker.client.registry', true)
 
-                salt.enforceState(venvPepper, '*apt*', 'docker.client.registry', true)
+                    salt.enforceState(venvPepper, '*apt*', 'git server', true)
 
-                salt.enforceState(venvPepper, '*apt*', 'git server', true)
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
 
-                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+                }
+            }
 
-                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+            stage("Update APT repos"){
+                common.infoMsg("Updating APT repositories")
+                salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
+            }
+
+            stage("Update formulas"){
+                common.infoMsg("Updating salt formulas")
+                salt.cmdRun(venvPepper, "I@salt:master", 'apt-get clean && apt-get update && apt-get install -y salt-formula-*')
+
+                common.infoMsg("Running salt sync-all")
+                salt.runSaltProcessStep(venvPepper, '*', 'saltutil.sync_all', [], null, true)
             }
         }
-
-        stage("Update APT repos"){
-            common.infoMsg("Updating APT repositories")
-            salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
+        catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
         }
-
-        stage("Update formulas"){
-            common.infoMsg("Updating salt formulas")
-            salt.cmdRun(venvPepper, "I@salt:master", 'apt-get clean && apt-get update && apt-get install -y salt-formula-*')
-
-            common.infoMsg("Running salt sync-all")
-            salt.runSaltProcessStep(venvPepper, '*', 'saltutil.sync_all', [], null, true)
-        }
-    }
-    catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
     }
 }
\ No newline at end of file
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index f1a4ab1..eecef42 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -44,97 +44,98 @@
 
 def pepperEnv = "pepperEnv"
 def artifacts_dir = 'validation_artifacts/'
-
-node() {
-    try{
-        stage('Setup virtualenv for Pepper') {
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-        }
-
-        stage('Configure') {
-            validate.installDocker(pepperEnv, TARGET_NODE)
-            if (ACCUMULATE_RESULTS.toBoolean() == false) {
-                sh "rm -r ${artifacts_dir}"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try{
+            stage('Setup virtualenv for Pepper') {
+                python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
             }
-            sh "mkdir -p ${artifacts_dir}"
-        }
 
-        stage('Run Tempest tests') {
-            if (RUN_TEMPEST_TESTS.toBoolean() == true) {
-                validate.runTempestTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, TEMPEST_CONFIG_REPO, TEMPEST_CONFIG_BRANCH, TEMPEST_REPO, TEMPEST_VERSION, TEMPEST_TEST_SET)
-            } else {
-                common.infoMsg("Skipping Tempest tests")
+            stage('Configure') {
+                validate.installDocker(pepperEnv, TARGET_NODE)
+                if (ACCUMULATE_RESULTS.toBoolean() == false) {
+                    sh "rm -r ${artifacts_dir}"
+                }
+                sh "mkdir -p ${artifacts_dir}"
             }
-        }
 
-        stage('Run Rally tests') {
-            if (RUN_RALLY_TESTS.toBoolean() == true) {
-                def rally_variables = ["floating_network=${FLOATING_NETWORK}",
-                                       "rally_image=${RALLY_IMAGE}",
-                                       "rally_flavor=${RALLY_FLAVOR}",
-                                       "availability_zone=${AVAILABILITY_ZONE}"]
-                validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables)
-            } else {
-                common.infoMsg("Skipping Rally tests")
+            stage('Run Tempest tests') {
+                if (RUN_TEMPEST_TESTS.toBoolean() == true) {
+                    validate.runTempestTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, TEMPEST_CONFIG_REPO, TEMPEST_CONFIG_BRANCH, TEMPEST_REPO, TEMPEST_VERSION, TEMPEST_TEST_SET)
+                } else {
+                    common.infoMsg("Skipping Tempest tests")
+                }
             }
-        }
 
-        stage('Run SPT tests') {
-            if (RUN_SPT_TESTS.toBoolean() == true) {
-                def spt_variables = ["spt_ssh_user=${SPT_SSH_USER}",
-                                     "spt_floating_network=${FLOATING_NETWORK}",
-                                     "spt_image=${SPT_IMAGE}",
-                                     "spt_user=${SPT_IMAGE_USER}",
-                                     "spt_flavor=${SPT_FLAVOR}",
-                                     "spt_availability_zone=${AVAILABILITY_ZONE}"]
-                validate.runSptTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, spt_variables)
-            } else {
-                common.infoMsg("Skipping SPT tests")
+            stage('Run Rally tests') {
+                if (RUN_RALLY_TESTS.toBoolean() == true) {
+                    def rally_variables = ["floating_network=${FLOATING_NETWORK}",
+                                           "rally_image=${RALLY_IMAGE}",
+                                           "rally_flavor=${RALLY_FLAVOR}",
+                                           "availability_zone=${AVAILABILITY_ZONE}"]
+                    validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables)
+                } else {
+                    common.infoMsg("Skipping Rally tests")
+                }
             }
-        }
 
-        stage('Run k8s bootstrap tests') {
-            if (RUN_K8S_TESTS.toBoolean() == true) {
-                def image = 'tomkukral/k8s-scripts'
-                def output_file = 'k8s-bootstrap-tests.txt'
-                def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
-                test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
-
-                def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-            } else {
-                common.infoMsg("Skipping k8s bootstrap tests")
+            stage('Run SPT tests') {
+                if (RUN_SPT_TESTS.toBoolean() == true) {
+                    def spt_variables = ["spt_ssh_user=${SPT_SSH_USER}",
+                                         "spt_floating_network=${FLOATING_NETWORK}",
+                                         "spt_image=${SPT_IMAGE}",
+                                         "spt_user=${SPT_IMAGE_USER}",
+                                         "spt_flavor=${SPT_FLAVOR}",
+                                         "spt_availability_zone=${AVAILABILITY_ZONE}"]
+                    validate.runSptTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, spt_variables)
+                } else {
+                    common.infoMsg("Skipping SPT tests")
+                }
             }
-        }
 
-        stage('Run k8s conformance e2e tests') {
-            if (RUN_K8S_TESTS.toBoolean() == true) {
-                def image = TEST_K8S_CONFORMANCE_IMAGE
-                def output_file = 'report-k8s-e2e-tests.txt'
-                def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
-                test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
+            stage('Run k8s bootstrap tests') {
+                if (RUN_K8S_TESTS.toBoolean() == true) {
+                    def image = 'tomkukral/k8s-scripts'
+                    def output_file = 'k8s-bootstrap-tests.txt'
+                    def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
+                    test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
 
-                def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
-                writeFile file: "${artifacts_dir}${output_file}", text: file_content
-            } else {
-                common.infoMsg("Skipping k8s conformance e2e tests")
+                    def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
+                    writeFile file: "${artifacts_dir}${output_file}", text: file_content
+                } else {
+                    common.infoMsg("Skipping k8s bootstrap tests")
+                }
             }
-        }
-        stage('Generate report') {
-            if (GENERATE_REPORT.toBoolean() == true) {
-                common.infoMsg("Generating html test report ...")
-                validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
-            } else {
-                common.infoMsg("Skipping report generation")
+
+            stage('Run k8s conformance e2e tests') {
+                if (RUN_K8S_TESTS.toBoolean() == true) {
+                    def image = TEST_K8S_CONFORMANCE_IMAGE
+                    def output_file = 'report-k8s-e2e-tests.txt'
+                    def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
+                    test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
+
+                    def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
+                    writeFile file: "${artifacts_dir}${output_file}", text: file_content
+                } else {
+                    common.infoMsg("Skipping k8s conformance e2e tests")
+                }
             }
+            stage('Generate report') {
+                if (GENERATE_REPORT.toBoolean() == true) {
+                    common.infoMsg("Generating html test report ...")
+                    validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
+                } else {
+                    common.infoMsg("Skipping report generation")
+                }
+            }
+            stage('Collect results') {
+                archiveArtifacts artifacts: "${artifacts_dir}/*"
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
         }
-        stage('Collect results') {
-            archiveArtifacts artifacts: "${artifacts_dir}/*"
-        }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-        throw e
     }
 }
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
index 50b2bce..60b8caf 100644
--- a/xtrabackup-restore-mysql-db.groovy
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -12,74 +12,75 @@
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+timeout(time: 12, unit: 'HOURS') {
+    node() {
 
-node() {
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
 
-    stage('Setup virtualenv for Pepper') {
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-    }
+        stage('Start restore') {
+            // # actual upgrade
 
-    stage('Start restore') {
-        // # actual upgrade
+            stage('Ask for manual confirmation') {
+                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
+            }
+            // database restore section
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Mysql service already stopped')
+            }
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('Mysql service already stopped')
+            }
+            try {
+                salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+            } catch (Exception er) {
+                common.warningMsg('Files are not present')
+            }
+            try {
+                salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
+            } catch (Exception er) {
+                common.warningMsg('Directory already exists')
+            }
+            try {
+                salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+            } catch (Exception er) {
+                common.warningMsg('Files were already moved')
+            }
+            try {
+                salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+            } catch (Exception er) {
+                common.warningMsg('Directory already empty')
+            }
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+            } catch (Exception er) {
+                common.warningMsg('File is not present')
+            }
+            salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+            _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
+            backup_dir = _pillar['return'][0].values()[0]
+            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+            print(backup_dir)
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
 
-        stage('Ask for manual confirmation') {
-            input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
-        }
-        // database restore section
-        try {
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
-        } catch (Exception er) {
-            common.warningMsg('Mysql service already stopped')
-        }
-        try {
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
-        } catch (Exception er) {
-            common.warningMsg('Mysql service already stopped')
-        }
-        try {
-            salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
-        } catch (Exception er) {
-            common.warningMsg('Files are not present')
-        }
-        try {
-            salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
-        } catch (Exception er) {
-            common.warningMsg('Directory already exists')
-        }
-        try {
-            salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
-        } catch (Exception er) {
-            common.warningMsg('Files were already moved')
-        }
-        try {
-            salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
-        } catch (Exception er) {
-            common.warningMsg('Directory already empty')
-        }
-        try {
-            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
-        } catch (Exception er) {
-            common.warningMsg('File is not present')
-        }
-        salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-        _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
-        backup_dir = _pillar['return'][0].values()[0]
-        if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
-        print(backup_dir)
-        salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-        salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-        salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
+            // wait until mysql service on galera master is up
+            salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
 
-        // wait until mysql service on galera master is up
-        salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
-
-        salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
-        try {
-            salt.commandStatus(pepperEnv, 'I@galera:slave', 'service mysql status', 'running')
-        } catch (Exception er) {
-            common.warningMsg('Either there are no galera slaves or something failed when starting mysql on galera slaves')
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+            try {
+                salt.commandStatus(pepperEnv, 'I@galera:slave', 'service mysql status', 'running')
+            } catch (Exception er) {
+                common.warningMsg('Either there are no galera slaves or something failed when starting mysql on galera slaves')
+            }
+            sleep(5)
+            salt.cmdRun(pepperEnv, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
         }
-        sleep(5)
-        salt.cmdRun(pepperEnv, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
     }
 }