Merge "Adding workaround for trusty network init"
diff --git a/aptly-promote-pipeline.groovy b/aptly-promote-pipeline.groovy
index eb10f40..dfc764e 100644
--- a/aptly-promote-pipeline.groovy
+++ b/aptly-promote-pipeline.groovy
@@ -20,7 +20,7 @@
   try{
     stage("promote") {
       lock("aptly-api") {
-        aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY)
+        aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean())
       }
     }
   } catch (Throwable e) {
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
new file mode 100644
index 0000000..9da7110
--- /dev/null
+++ b/ceph-add-node.groovy
@@ -0,0 +1,76 @@
+/**
+ *
+ * Add Ceph node to existing cluster
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *  HOST                        Host (minion id) to be added
+ *  HOST_TYPE                   Type of Ceph node to be added. Valid values are mon/osd/rgw
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+orchestrate = new com.mirantis.mk.Orchestrate()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    matches = ["osd", "mon", "rgw"]
+    def found = false
+    for (s in matches) {
+        if (HOST_TYPE.toLowerCase() == s) {
+            found = true
+        }
+    }
+
+    if (!found) {
+            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
+        break
+    }
+
+    if (HOST_TYPE.toLowerCase() != 'osd') {
+
+        // launch VMs
+        stage('Launch VMs') {
+            salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control', true)
+
+            // wait till the HOST appears in salt-key on salt-master
+            salt.minionPresent(pepperEnv, 'I@salt:master', HOST)
+        }
+    }
+
+    // run basic states
+    stage('Install infra') {
+        orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
+    }
+
+    if (HOST_TYPE.toLowerCase() == 'osd') {
+
+        // Install Ceph osd
+        stage('Install Ceph OSD') {
+            orchestrate.installCephOsd(pepperEnv, HOST)
+        }
+    } else if (HOST_TYPE.toLowerCase() == 'mon') {
+        // Install Ceph mon
+        stage('Install Ceph MON') {
+            salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
+            // install Ceph Mons
+            salt.enforceState(pepperEnv, 'I@ceph:mon', 'ceph.mon', true)
+            if (salt.testTarget(pepperEnv, 'I@ceph:mgr')) {
+                salt.enforceState(pepperEnv, 'I@ceph:mgr', 'ceph.mgr', true)
+            }
+        }
+    } else if (HOST_TYPE.toLowerCase() == 'rgw') {
+        // Install Ceph rgw
+        stage('Install Ceph RGW') {
+            salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy', 'ceph.radosgw'], true)
+        }
+    }
+}
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
new file mode 100644
index 0000000..8b8d256
--- /dev/null
+++ b/ceph-backend-migration.groovy
@@ -0,0 +1,218 @@
+/**
+ *
+ * Filestore to Bluestore or vice versa backend migration
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL                 URL of Salt master
+ *  SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *  ADMIN_HOST                      Host (minion id) with admin keyring and /etc/crushmap file present
+ *  OSD                             OSD ids to be migrated if single OSD host is targeted (comma-separated list - 1,2,3)
+ *  TARGET                          Hosts (minion ids) to be targeted
+ *  CLUSTER_FLAGS                   Comma separated list of tags to apply to cluster
+ *  WAIT_FOR_HEALTHY                Wait for cluster rebalance before stoping daemons
+ *  ORIGIN_BACKEND                  Ceph backend before upgrade
+ *  PER_OSD_CONTROL                 Set to true if Ceph status verification after every osd disk migration is desired
+ *  PER_OSD_HOST_CONTROL            Set to true if Ceph status verificaton after whole OSD host migration is desired
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+
+MIGRATION_METHOD = "per-osd"
+// TBD: per-host
+
+def pepperEnv = "pepperEnv"
+def flags = CLUSTER_FLAGS.tokenize(',')
+def osds = OSD.tokenize(',')
+
+def runCephCommand(master, target, cmd) {
+    return salt.cmdRun(master, target, cmd)
+}
+
+def waitForHealthy(master, count=0, attempts=300) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    if (MIGRATION_METHOD == 'per-osd') {
+
+        if (flags.size() > 0) {
+            stage('Set cluster flags') {
+                for (flag in flags) {
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                }
+            }
+        }
+
+        def target_hosts = salt.getMinions(pepperEnv, TARGET)
+
+        for (HOST in target_hosts) {
+            def osd_ids = []
+
+            // get list of osd disks of the host
+            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+
+            for (i in ceph_disks) {
+                def osd_id = i.getKey().toString()
+                if (osd_id in osds || OSD == '*') {
+                    osd_ids.add('osd.' + osd_id)
+                    print("Will migrate " + osd_id)
+                } else {
+                    print("Skipping " + osd_id)
+                }
+            }
+
+            for (osd_id in osd_ids) {
+
+                def id = osd_id.replaceAll('osd.', '')
+                def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
+
+                if (backend.contains(ORIGIN_BACKEND)) {
+
+                    // wait for healthy cluster before manipulating with osds
+                    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+                        waitForHealthy(pepperEnv)
+                    }
+
+                    // `ceph osd out <id> <id>`
+                    stage('Set OSDs out') {
+                            runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
+                    }
+
+                    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+                        sleep(5)
+                        waitForHealthy(pepperEnv)
+                    }
+
+                    // stop osd daemons
+                    stage('Stop OSD daemons') {
+                        salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')],  null, true)
+                    }
+
+                    // remove keyring `ceph auth del osd.3`
+                    stage('Remove OSD keyrings from auth') {
+                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+                    }
+
+                    // remove osd `ceph osd rm osd.3`
+                    stage('Remove OSDs') {
+                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+                    }
+
+                    def mount = runCephCommand(pepperEnv, HOST, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+                    dev = mount.split()[0].replaceAll("[0-9]","")
+
+                    // remove journal or block_db partition `parted /dev/sdj rm 3`
+                    stage('Remove journal / block_db partition') {
+                        def partition_uuid = ""
+                        def journal_partition_uuid = ""
+                        def block_db_partition_uuid = ""
+                        try {
+                            journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+                            journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                        } catch (Exception e) {
+                            common.infoMsg(e)
+                        }
+                        try {
+                            block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+                            block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                        } catch (Exception e) {
+                            common.infoMsg(e)
+                        }
+
+                        // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                        if (journal_partition_uuid?.trim()) {
+                            partition_uuid = journal_partition_uuid
+                        } else if (block_db_partition_uuid?.trim()) {
+                            partition_uuid = block_db_partition_uuid
+                        }
+
+                        // if failed disk had block_db or journal on different disk, then remove the partition
+                        if (partition_uuid?.trim()) {
+                            def partition = ""
+                            try {
+                                // partition = /dev/sdi2
+                                partition = runCephCommand(pepperEnv, HOST, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
+                            } catch (Exception e) {
+                                common.warningMsg(e)
+                            }
+
+                            if (partition?.trim()) {
+                                // dev = /dev/sdi
+                                def dev = partition.replaceAll("[0-9]", "")
+                                // part_id = 2
+                                def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                                runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
+                            }
+                        }
+                    }
+
+                    // umount `umount /dev/sdi1`
+                    stage('Umount devices') {
+                        runCephCommand(pepperEnv, HOST, "umount /var/lib/ceph/osd/ceph-${id}")
+                    }
+
+                    // zap disks `ceph-disk zap /dev/sdi`
+                    stage('Zap device') {
+                        runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+                    }
+
+                    // Deploy failed Ceph OSD
+                    stage('Deploy Ceph OSD') {
+                        salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_pillar', [], null, true, 5)
+                        salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
+                    }
+
+                    if (PER_OSD_CONTROL.toBoolean() == true) {
+                        stage("Verify backend version for osd.${id}") {
+                            sleep(5)
+                            runCephCommand(pepperEnv, HOST, "ceph osd metadata ${id} | grep osd_objectstore")
+                            runCephCommand(pepperEnv, HOST, "ceph -s")
+                        }
+
+                        stage('Ask for manual confirmation') {
+                            input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+                        }
+                    }
+                }
+            }
+            if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
+                stage("Verify backend versions") {
+                    sleep(5)
+                    runCephCommand(pepperEnv, HOST, "ceph osd metadata | grep osd_objectstore -B2")
+                    runCephCommand(pepperEnv, HOST, "ceph -s")
+                }
+
+                stage('Ask for manual confirmation') {
+                    input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
+                }
+            }
+
+        }
+        // remove cluster flags
+        if (flags.size() > 0) {
+            stage('Unset cluster flags') {
+                for (flag in flags) {
+                    common.infoMsg('Removing flag ' + flag)
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                }
+            }
+        }
+    }
+}
diff --git a/ceph-enforce-weights.groovy b/ceph-enforce-weights.groovy
deleted file mode 100644
index 4e06322..0000000
--- a/ceph-enforce-weights.groovy
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- *
- * Enforce OSD weights from model
- *
- * Requred parameters:
- *  SALT_MASTER_URL             URL of Salt master
- *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
- *
- *  ADMIN_HOST                  Host (minion id) with admin keyring
- *
- */
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-
-// configure global variables
-def saltMaster
-
-def runCephCommand(master, cmd) {
-    return salt.cmdRun(master, ADMIN_HOST, cmd)
-}
-
-def grains
-
-node("python") {
-
-    stage('Load cluster information') {
-        // create connection to salt master
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-        // get list of disk from grains
-        grains = salt.getGrain(saltMaster, 'I@ceph:osd')['return'][0]
-        common.prettyPrint(grains)
-
-    }
-
-    stage('Enforce weights on OSDs') {
-
-        for (host in grains) {
-            // parse grains
-            def hostGrains = host.value
-            common.prettyPrint(hostGrains)
-
-            def hostname = hostGrains.host
-            def salt_id = hostGrains.id
-            def ceph_host_id = hostGrains.ceph_osd_host_id
-
-            common.infoMsg("Setting weights on host ${hostname} (${salt_id}), ceph_id ${ceph_host_id}")
-            for (disk in hostGrains.ceph_osd_disk) {
-                def osd_id = ceph_host_id + disk.key
-                print(osd_id)
-                print(disk.value)
-                print(disk.key)
-                def cmd = "ceph osd crush set ${osd_id} ${disk.value.weight} host=${hostname}"
-                print(runCephCommand(saltMaster, cmd))
-            }
-        }
-
-    }
-}
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
new file mode 100644
index 0000000..cda53be
--- /dev/null
+++ b/ceph-remove-node.groovy
@@ -0,0 +1,187 @@
+/**
+ *
+ * Remove Ceph node from existing cluster
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *  HOST                        Host (minion id) to be removed
+ *  HOST_TYPE                   Type of Ceph node to be removed. Valid values are mon/osd/rgw
+ *  ADMIN_HOST                  Host (minion id) with admin keyring
+ *  WAIT_FOR_HEALTHY            Wait for cluster rebalance before stoping daemons
+ *  GENERATE_CRUSHMAP           Set to true if the crush map should be generated
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+orchestrate = new com.mirantis.mk.Orchestrate()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+
+def runCephCommand(master, target, cmd) {
+    return salt.cmdRun(master, target, cmd)
+}
+
+def waitForHealthy(master, count=0, attempts=300) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    matches = ["osd", "mon", "rgw"]
+    def found = false
+    for (s in matches) {
+        if (HOST_TYPE.toLowerCase() == s) {
+            found = true
+        }
+    }
+
+    if (!found) {
+            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
+        break
+    }
+
+    stage('Refresh_pillar') {
+        salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
+    }
+
+    //  split minion id on '.' and remove '*'
+    def target = HOST.split("\\.")[0].replace("*", "")
+
+    def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
+    domain = _pillar['return'][0].values()[0].values()[0]
+
+    if (HOST_TYPE.toLowerCase() == 'rgw') {
+        // Remove Ceph rgw
+        stage('Remove Ceph RGW') {
+            salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
+        }
+    }
+
+    if (HOST_TYPE.toLowerCase() != 'osd') {
+
+        // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
+        stage('Destroy VM') {
+            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
+            def kvm01 = _pillar['return'][0].values()[0].values()[0]
+
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
+            def targetProvider = _pillar['return'][0].values()[0]
+
+            salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
+            salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
+        }
+    } else if (HOST_TYPE.toLowerCase() == 'osd') {
+        def osd_ids = []
+
+        // get list of osd disks of the host
+        def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+
+        for (i in ceph_disks) {
+            def osd_id = i.getKey().toString()
+            osd_ids.add('osd.' + osd_id)
+            print("Will delete " + osd_id)
+        }
+
+        // `ceph osd out <id> <id>`
+        stage('Set OSDs out') {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+        }
+
+        // wait for healthy cluster
+        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+            sleep(5)
+            waitForHealthy(pepperEnv)
+        }
+
+        // stop osd daemons
+        stage('Stop OSD daemons') {
+            for (i in osd_ids) {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+            }
+        }
+
+        // `ceph osd crush remove osd.2`
+        stage('Remove OSDs from CRUSH') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+            }
+        }
+
+        // remove keyring `ceph auth del osd.3`
+        stage('Remove OSD keyrings from auth') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+            }
+        }
+
+        // remove osd `ceph osd rm osd.3`
+        stage('Remove OSDs') {
+            for (i in osd_ids) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+            }
+        }
+
+        // purge Ceph pkgs
+        stage('Purge Ceph OSD pkgs') {
+            runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd libcephfs2 python-cephfs librados2 python-rados -y')
+        }
+
+        // stop salt-minion service and move its configuration
+        stage('Stop salt-minion') {
+            salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
+            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
+        }
+    }
+
+    stage('Remove salt-key') {
+        try {
+            salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+        try {
+            salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+    }
+
+    if (HOST_TYPE.toLowerCase() == 'mon') {
+        // Update Monmap
+        stage('Update monmap') {
+            runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
+            try {
+                runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
+            } catch (Exception e) {
+                common.warningMsg(e)
+            }
+            runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
+        }
+
+        // Update configs
+        stage('Update Ceph configs') {
+            salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
+        }
+    }
+
+    if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
+        stage('Generate CRUSHMAP') {
+            salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
+        }
+    }
+}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index ac102eb..c51292e 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -7,6 +7,7 @@
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
  *
  *  HOST                        Host (minion id) to be removed
+ *  OSD                         Comma separated list of osd ids to be removed
  *  ADMIN_HOST                  Host (minion id) with admin keyring
  *  CLUSTER_FLAGS               Comma separated list of tags to apply to cluster
  *  WAIT_FOR_HEALTHY            Wait for cluster rebalance before stoping daemons
@@ -15,9 +16,9 @@
 
 common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-// configure global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 
@@ -25,31 +26,43 @@
     return salt.cmdRun(master, ADMIN_HOST, cmd)
 }
 
+def waitForHealthy(master, count=0, attempts=300) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
 node("python") {
 
     // create connection to salt master
-    saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
     if (flags.size() > 0) {
         stage('Set cluster flags') {
             for (flag in flags) {
-                runCephCommand(saltMaster, 'ceph osd set ' + flag)
+                runCephCommand(pepperEnv, 'ceph osd set ' + flag)
             }
         }
     }
 
-    // get list of disk at the osd
-    def pillar_disks = salt.getPillar(saltMaster, HOST, 'ceph:osd:disk')['return'][0].values()[0]
-    def hostname_id = salt.getPillar(saltMaster, HOST, 'ceph:osd:host_id')['return'][0].values()[0]
     def osd_ids = []
 
-    print("host_id is ${hostname_id}")
     print("osds:")
     print(osds)
 
-    for (i in pillar_disks) {
-        def osd_id = (hostname_id + i.key).toInteger().toString()
-        print("Evaluating ${osd_id}")
+    // get list of osd disks of the host
+    def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+    common.prettyPrint(ceph_disks)
+
+    for (i in ceph_disks) {
+        def osd_id = i.getKey().toString()
         if (osd_id in osds || OSD == '*') {
             osd_ids.add('osd.' + osd_id)
             print("Will delete " + osd_id)
@@ -60,48 +73,40 @@
 
     // `ceph osd out <id> <id>`
     stage('Set OSDs out') {
-            runCephCommand(saltMaster, 'ceph osd out ' + osd_ids.join(' '))
+        runCephCommand(pepperEnv, 'ceph osd out ' + osd_ids.join(' '))
     }
 
     // wait for healthy cluster
-    if (common.validInputParam('WAIT_FOR_HEALTHY') && WAIT_FOR_HEALTHY.toBoolean()) {
-        stage('Waiting for healthy cluster') {
-            while (true) {
-                def health = runCephCommand(saltMaster, 'ceph health')['return'][0].values()[0]
-                if (health.contains('HEALTH OK')) {
-                    common.infoMsg('Cluster is healthy')
-                    break;
-                }
-                sleep(60)
-            }
-        }
+    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+        sleep(5)
+        waitForHealthy(pepperEnv)
     }
 
     // stop osd daemons
     stage('Stop OSD daemons') {
         for (i in osd_ids) {
-            salt.runSaltProcessStep(saltMaster, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
         }
     }
 
     // `ceph osd crush remove osd.2`
     stage('Remove OSDs from CRUSH') {
         for (i in osd_ids) {
-            runCephCommand(saltMaster, 'ceph osd crush remove ' + i)
+            runCephCommand(pepperEnv, 'ceph osd crush remove ' + i)
         }
     }
 
     // remove keyring `ceph auth del osd.3`
     stage('Remove OSD keyrings from auth') {
         for (i in osd_ids) {
-            runCephCommand(saltMaster, 'ceph auth del ' + i)
+            runCephCommand(pepperEnv, 'ceph auth del ' + i)
         }
     }
 
     // remove osd `ceph osd rm osd.3`
     stage('Remove OSDs') {
         for (i in osd_ids) {
-            runCephCommand(saltMaster, 'ceph osd rm ' + i)
+            runCephCommand(pepperEnv, 'ceph osd rm ' + i)
         }
     }
 
@@ -110,9 +115,8 @@
         stage('Unset cluster flags') {
             for (flag in flags) {
                 common.infoMsg('Removing flag ' + flag)
-                runCephCommand(saltMaster, 'ceph osd unset ' + flag)
+                runCephCommand(pepperEnv, 'ceph osd unset ' + flag)
             }
         }
     }
-
 }
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
new file mode 100644
index 0000000..9127581
--- /dev/null
+++ b/ceph-replace-failed-osd.groovy
@@ -0,0 +1,188 @@
+/**
+ *
+ * Replace failed disk with a new disk
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL                 URL of Salt master
+ *  SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *  HOST                            Host (minion id) to be removed
+ *  ADMIN_HOST                      Host (minion id) with admin keyring and /etc/crushmap file present
+ *  OSD                             Failed OSD ids to be replaced (comma-separated list - 1,2,3)
+ *  DEVICE                          Comma separated list of failed devices that will be replaced at HOST (/dev/sdb,/dev/sdc)
+ *  JOURNAL_OR_BLOCKDB_PARTITION    Comma separated list of partitions where journal or block_db for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
+ *  CLUSTER_FLAGS                   Comma separated list of tags to apply to cluster
+ *  WAIT_FOR_HEALTHY                Wait for cluster rebalance before stoping daemons
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+def flags = CLUSTER_FLAGS.tokenize(',')
+def osds = OSD.tokenize(',')
+def devices = DEVICE.tokenize(',')
+def journals_blockdbs = JOURNAL_OR_BLOCKDB_PARTITION.tokenize(',')
+
+
+def runCephCommand(master, target, cmd) {
+    return salt.cmdRun(master, target, cmd)
+}
+
+def waitForHealthy(master, count=0, attempts=300) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    if (flags.size() > 0) {
+        stage('Set cluster flags') {
+            for (flag in flags) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+            }
+        }
+    }
+
+    def osd_ids = []
+
+    print("osds:")
+    print(osds)
+
+    // get list of osd disks of the host
+    def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+    common.prettyPrint(ceph_disks)
+
+    for (i in ceph_disks) {
+        def osd_id = i.getKey().toString()
+        if (osd_id in osds || OSD == '*') {
+            osd_ids.add('osd.' + osd_id)
+            print("Will delete " + osd_id)
+        } else {
+            print("Skipping " + osd_id)
+        }
+    }
+
+    // `ceph osd out <id> <id>`
+    stage('Set OSDs out') {
+        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+    }
+
+    // wait for healthy cluster
+    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+        sleep(5)
+        waitForHealthy(pepperEnv)
+    }
+
+    // stop osd daemons
+    stage('Stop OSD daemons') {
+        for (i in osd_ids) {
+            salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+        }
+    }
+    /*
+    // `ceph osd crush remove osd.2`
+    stage('Remove OSDs from CRUSH') {
+        for (i in osd_ids) {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+        }
+    }
+
+    // wait for pgs to rebalance
+    if (WAIT_FOR_PG_REBALANCE.toBoolean() == true) {
+        stage('Waiting for pgs to rebalance') {
+            while (true) {
+                def status = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph -s')['return'][0].values()[0]
+                if (!status.contains('degraded')) {
+                    common.infoMsg('PGs rebalanced')
+                    break;
+                }
+                sleep(10)
+            }
+        }
+    }
+    */
+    // remove keyring `ceph auth del osd.3`
+    stage('Remove OSD keyrings from auth') {
+        for (i in osd_ids) {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+        }
+    }
+
+    // remove osd `ceph osd rm osd.3`
+    stage('Remove OSDs') {
+        for (i in osd_ids) {
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+        }
+    }
+
+    // umount `umount /dev/sdi1`
+    stage('Umount devices') {
+        for (dev in devices) {
+            runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+        }
+    }
+
+    // zap disks `ceph-disk zap /dev/sdi`
+    stage('Zap devices') {
+        for (dev in devices) {
+            runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+        }
+    }
+
+    // remove journal or block_db partition `parted /dev/sdj rm 3`
+    stage('Remove journal / block_db partitions') {
+        for (journal_blockdb in journals_blockdbs) {
+            if (journal_blockdb?.trim()) {
+                // dev = /dev/sdi
+                def dev = journal_blockdb.replaceAll("[0-9]", "")
+                // part_id = 2
+                def part_id = journal_blockdb.substring(journal_blockdb.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
+            }
+        }
+    }
+
+    // Deploy failed Ceph OSD
+    stage('Deploy Ceph OSD') {
+        salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
+    }
+
+    // remove cluster flags
+    if (flags.size() > 0) {
+        stage('Unset cluster flags') {
+            for (flag in flags) {
+                common.infoMsg('Removing flag ' + flag)
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+            }
+        }
+    }
+
+    /*
+    if (ENFORCE_CRUSHMAP.toBoolean() == true) {
+
+        // enforce crushmap `crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled; ceph osd setcrushmap -i /etc/ceph/crushmap.compiled`
+        stage('Enforce crushmap') {
+
+            stage('Ask for manual confirmation') {
+                input message: "Are you sure that your ADMIN_HOST has correct /etc/ceph/crushmap file? Click proceed to compile and enforce crushmap."
+            }
+            runCephCommand(pepperEnv, ADMIN_HOST, 'crushtool -c /etc/ceph/crushmap -o /etc/ceph/crushmap.compiled')
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd setcrushmap -i /etc/ceph/crushmap.compiled')
+        }
+    }
+    */
+}
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
new file mode 100644
index 0000000..5844f77
--- /dev/null
+++ b/ceph-upgrade.groovy
@@ -0,0 +1,209 @@
+/**
+ *
+ * Upgrade Ceph mon/mgr/osd/rgw/client
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL                 URL of Salt master
+ *  SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *  ADMIN_HOST                      Host (minion id) with admin keyring and /etc/crushmap file present
+ *  CLUSTER_FLAGS                   Comma separated list of tags to apply to cluster
+ *  WAIT_FOR_HEALTHY                Wait for cluster rebalance before stoping daemons
+ *  ORIGIN_RELEASE                  Ceph release version before upgrade
+ *  TARGET_RELEASE                  Ceph release version after upgrade
+ *  STAGE_UPGRADE_MON               Set to True if Ceph mon nodes upgrade is desired
+ *  STAGE_UPGRADE_MGR               Set to True if Ceph mgr nodes upgrade or new deploy is desired
+ *  STAGE_UPGRADE_OSD               Set to True if Ceph osd nodes upgrade is desired
+ *  STAGE_UPGRADE_RGW               Set to True if Ceph rgw nodes upgrade is desired
+ *  STAGE_UPGRADE_CLIENT            Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+def flags = CLUSTER_FLAGS.tokenize(',')
+
+def runCephCommand(master, target, cmd) {
+    return salt.cmdRun(master, target, cmd)
+}
+
+def waitForHealthy(master, count=0, attempts=300) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
+def backup(master, target) {
+    stage("backup ${target}") {
+
+        def _pillar = salt.getGrain(master, 'I@salt:master', 'domain')
+        def domain = _pillar['return'][0].values()[0].values()[0]
+
+        def kvm_pillar = salt.getGrain(master, 'I@salt:control', 'id')
+        def kvm01 = kvm_pillar['return'][0].values()[0].values()[0]
+
+        def target_pillar = salt.getGrain(master, "I@ceph:${target}", 'host')
+        def minions = target_pillar['return'][0].values()
+        for (minion in minions) {
+            def minion_name = minion.values()[0]
+            def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
+            def minionProvider = provider_pillar['return'][0].values()[0]
+
+            waitForHealthy(master)
+            try {
+                salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+            } catch (Exception e) {
+                common.warningMsg('Backup already exists')
+            }
+            try {
+                salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
+            } catch (Exception e) {
+                common.warningMsg('Backup already exists')
+            }
+            try {
+                salt.cmdRun(master, "${minionProvider}", "virsh start ${minion_name}.${domain}")
+            } catch (Exception e) {
+                common.warningMsg(e)
+            }
+            salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
+            waitForHealthy(master)
+        }
+    }
+    return
+}
+
+def upgrade(master, target) {
+
+    stage("Change ${target} repos") {
+        salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
+        salt.enforceState(master, "I@ceph:${target}", 'linux.system.repo', true)
+    }
+    if (target == 'mgr') {
+        stage('Run ceph mgr state') {
+            salt.enforceState(master, "I@ceph:mgr", "ceph.mgr", true)
+        }
+    }
+    if (target == 'common') {
+        stage('Upgrade ceph-common pkgs') {
+            runCephCommand(master, "I@ceph:${target}", "apt install ceph-${target} -y")
+        }
+    } else {
+        minions = salt.getMinions(master, "I@ceph:${target}")
+
+        for (minion in minions) {
+            // upgrade pkgs
+            if (target == 'radosgw') {
+                stage('Upgrade radosgw pkgs') {
+                    runCephCommand(master, "I@ceph:${target}", "apt install ${target} -y ")
+                }
+            } else {
+                stage("Upgrade ${target} pkgs on ${minion}") {
+                    runCephCommand(master, "${minion}", "apt install ceph-${target} -y")
+                }
+            }
+            // restart services
+            stage("Restart ${target} services on ${minion}") {
+                runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
+            }
+
+            stage("Verify services for ${minion}") {
+                sleep(10)
+                runCephCommand(master, ADMIN_HOST, "ceph -s")
+            }
+
+            stage('Ask for manual confirmation') {
+                input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
+            }
+        }
+    }
+    runCephCommand(master, ADMIN_HOST, "ceph versions")
+    sleep(5)
+    return
+}
+
+node("python") {
+
+    // create connection to salt master
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    if (BACKUP_ENABLED.toBoolean() == true) {
+        if (STAGE_UPGRADE_MON.toBoolean() == true) {
+            backup(pepperEnv, 'mon')
+        }
+        if (STAGE_UPGRADE_RGW.toBoolean() == true) {
+            backup(pepperEnv, 'radosgw')
+        }
+    }
+
+    if (flags.size() > 0) {
+        stage('Set cluster flags') {
+            for (flag in flags) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+            }
+        }
+    }
+
+    if (STAGE_UPGRADE_MON.toBoolean() == true) {
+        upgrade(pepperEnv, 'mon')
+    }
+
+    if (STAGE_UPGRADE_MGR.toBoolean() == true) {
+        upgrade(pepperEnv, 'mgr')
+    }
+
+    if (STAGE_UPGRADE_OSD.toBoolean() == true) {
+        upgrade(pepperEnv, 'osd')
+    }
+
+    if (STAGE_UPGRADE_RGW.toBoolean() == true) {
+        upgrade(pepperEnv, 'radosgw')
+    }
+
+    if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
+        upgrade(pepperEnv, 'common')
+    }
+
+    // remove cluster flags
+    if (flags.size() > 0) {
+        stage('Unset cluster flags') {
+            for (flag in flags) {
+                if (!flag.contains('sortbitwise')) {
+                    common.infoMsg('Removing flag ' + flag)
+                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                }
+
+            }
+        }
+    }
+
+    if (STAGE_FINALIZE.toBoolean() == true) {
+        stage("Finalize ceph version upgrade") {
+            runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
+            try {
+                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
+            } catch (Exception e) {
+                common.warningMsg(e)
+            }
+            try {
+                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
+            } catch (Exception e) {
+                common.warningMsg(e)
+            }
+        }
+    }
+
+    // wait for healthy cluster
+    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+        waitForHealthy(pepperEnv)
+    }
+}
diff --git a/change-config.groovy b/change-config.groovy
index 44832ed..0b4538c 100644
--- a/change-config.groovy
+++ b/change-config.groovy
@@ -9,13 +9,15 @@
  *   TARGET_SUBSET_TEST         Number of nodes to test config changes, empty string means all targetted nodes.
  *   TARGET_SUBSET_LIVE         Number of selected noded to live apply selected config changes.
  *   TARGET_BATCH_LIVE          Batch size for the complete live config changes on all nodes, empty string means apply to all targetted nodes.
+ *   PULL_MODEL                 Pull the latest cluster model using reclass.storage.data state
  *
 **/
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def targetTestSubset
 def targetLiveSubset
 def targetLiveAll
@@ -33,12 +35,20 @@
             states = null
         }
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        if (common.validInputParam("PULL_MODEL") && PULL_MODEL.toBoolean() == true) {
+            stage('Update the reclass cluster model') {
+                def saltMasterTarget = ['expression': 'I@salt:master', 'type': 'compound']
+                result = salt.runSaltCommand(pepperEnv, 'local', saltMasterTarget, 'state.apply', null, "reclass.storage.data")
+                salt.checkResult(result)
+            }
         }
 
         stage('List target servers') {
-            minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
             if (minions.isEmpty()) {
                 throw new Exception("No minion was targeted")
             }
@@ -59,7 +69,7 @@
             def kwargs = [
                 'test': true
             ]
-            result = salt.runSaltCommand(saltMaster, 'local', targetTestSubset, 'state.apply', null, states, kwargs)
+            result = salt.runSaltCommand(pepperEnv, 'local', targetTestSubset, 'state.apply', null, states, kwargs)
             salt.checkResult(result)
         }
 
@@ -70,7 +80,7 @@
         }
 
         stage('Apply config changes on sample') {
-            result = salt.runSaltCommand(saltMaster, 'local', targetLiveSubset, 'state.apply', null, states)
+            result = salt.runSaltCommand(pepperEnv, 'local', targetLiveSubset, 'state.apply', null, states)
             salt.checkResult(result)
         }
 
@@ -81,7 +91,7 @@
         }
 
         stage('Apply config changes on all nodes') {
-            result = salt.runSaltCommand(saltMaster, 'local', targetLiveAll, 'state.apply', null, states)
+            result = salt.runSaltCommand(pepperEnv, 'local', targetLiveAll, 'state.apply', null, states)
             salt.checkResult(result)
         }
 
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 8902e1f..9fc42d3 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -31,13 +31,15 @@
 openstack = new com.mirantis.mk.Openstack()
 salt = new com.mirantis.mk.Salt()
 orchestrate = new com.mirantis.mk.Orchestrate()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
 _MAX_PERMITTED_STACKS = 2
 
 node {
     try {
         // connection objects
         def openstackCloud
-        def saltMaster
 
         // value defaults
         def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
@@ -115,7 +117,7 @@
             saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
             currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
             saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
-            saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
+            python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
         }
 
         //
@@ -128,42 +130,42 @@
             // sync_all
             // linux,openssh,salt.minion.ntp
 
-            orchestrate.installFoundationInfra(saltMaster)
-            orchestrate.validateFoundationInfra(saltMaster)
+            orchestrate.installFoundationInfra(pepperEnv)
+            orchestrate.validateFoundationInfra(pepperEnv)
         }
 
         stage("Deploy GlusterFS") {
-            salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
+            salt.enforceState(pepperEnv, 'I@glusterfs:server', 'glusterfs.server.service', true)
             retry(2) {
-                salt.enforceState(saltMaster, 'ci01*', 'glusterfs.server.setup', true)
+                salt.enforceState(pepperEnv, 'ci01*', 'glusterfs.server.setup', true)
             }
             sleep(5)
-            salt.enforceState(saltMaster, 'I@glusterfs:client', 'glusterfs.client', true)
+            salt.enforceState(pepperEnv, 'I@glusterfs:client', 'glusterfs.client', true)
 
             timeout(5) {
                 println "Waiting for GlusterFS volumes to get mounted.."
-                salt.cmdRun(saltMaster, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
+                salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
             }
-            print common.prettyPrint(salt.cmdRun(saltMaster, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
+            print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
         }
 
         stage("Deploy GlusterFS") {
-            salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy,keepalived')
+            salt.enforceState(pepperEnv, 'I@haproxy:proxy', 'haproxy,keepalived')
         }
 
         stage("Setup Docker Swarm") {
-            salt.enforceState(saltMaster, 'I@docker:host', 'docker.host', true)
-            salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.swarm', true)
-            salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'salt', true)
-            salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.flush')
-            salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.update')
-            salt.enforceState(saltMaster, 'I@docker:swarm', 'docker.swarm', true)
-            print common.prettyPrint(salt.cmdRun(saltMaster, 'I@docker:swarm:role:master', 'docker node ls'))
+            salt.enforceState(pepperEnv, 'I@docker:host', 'docker.host', true)
+            salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.swarm', true)
+            salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'salt', true)
+            salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.flush')
+            salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.update')
+            salt.enforceState(pepperEnv, 'I@docker:swarm', 'docker.swarm', true)
+            print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@docker:swarm:role:master', 'docker node ls'))
         }
 
         stage("Configure OSS services") {
-            salt.enforceState(saltMaster, 'I@devops_portal:config', 'devops_portal.config')
-            salt.enforceState(saltMaster, 'I@rundeck:server', 'rundeck.server')
+            salt.enforceState(pepperEnv, 'I@devops_portal:config', 'devops_portal.config')
+            salt.enforceState(pepperEnv, 'I@rundeck:server', 'rundeck.server')
         }
 
         stage("Deploy Docker services") {
@@ -171,19 +173,19 @@
             // services are deployed
             // XXX: for some weird unknown reason, refresh_pillar is
             // required to execute here
-            salt.runSaltProcessStep(saltMaster, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
-            salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
+            salt.runSaltProcessStep(pepperEnv, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
+            salt.enforceState(pepperEnv, 'I@aptly:publisher', 'aptly.publisher', true)
             retry(3) {
                 sleep(5)
-                salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
+                salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.client')
             }
             // XXX: Workaround to have `/var/lib/jenkins` on all
             // nodes where are jenkins_slave services are created.
-            salt.runSaltProcessStep(saltMaster, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
+            salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
         }
 
         stage("Configure CI/CD services") {
-            salt.syncAll(saltMaster, '*')
+            salt.syncAll(pepperEnv, '*')
 
             // Aptly
             timeout(10) {
@@ -192,68 +194,68 @@
                     // XXX: retry to workaround magical VALUE_TRIMMED
                     // response from salt master + to give slow cloud some
                     // more time to settle down
-                    salt.cmdRun(saltMaster, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+                    salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
                 }
             }
-            salt.enforceState(saltMaster, 'I@aptly:server', 'aptly', true)
+            salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
 
             // OpenLDAP
             timeout(10) {
                 println "Waiting for OpenLDAP to come up.."
-                salt.cmdRun(saltMaster, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
+                salt.cmdRun(pepperEnv, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
             }
-            salt.enforceState(saltMaster, 'I@openldap:client', 'openldap', true)
+            salt.enforceState(pepperEnv, 'I@openldap:client', 'openldap', true)
 
             // Gerrit
             timeout(10) {
                 println "Waiting for Gerrit to come up.."
-                salt.cmdRun(saltMaster, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
+                salt.cmdRun(pepperEnv, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
             }
-            salt.enforceState(saltMaster, 'I@gerrit:client', 'gerrit', true)
+            salt.enforceState(pepperEnv, 'I@gerrit:client', 'gerrit', true)
 
             // Jenkins
             timeout(10) {
                 println "Waiting for Jenkins to come up.."
-                salt.cmdRun(saltMaster, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
+                salt.cmdRun(pepperEnv, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
             }
             retry(2) {
                 // XXX: needs retry as first run installs python-jenkins
                 // thus make jenkins modules available for second run
-                salt.enforceState(saltMaster, 'I@jenkins:client', 'jenkins', true)
+                salt.enforceState(pepperEnv, 'I@jenkins:client', 'jenkins', true)
             }
 
             // Postgres client - initialize OSS services databases
             timeout(300){
                 println "Waiting for postgresql database to come up.."
-                salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_db | grep "ready to accept"; then break; else sleep 5; fi; done')
+                salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
             }
             // XXX: first run usually fails on some inserts, but we need to create databases at first 
-            salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
+            salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true, false)
 
             // Setup postgres database with integration between
             // Pushkin notification service and Security Monkey security audit service
             timeout(10) {
                 println "Waiting for Pushkin to come up.."
-                salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
+                salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
             }
-            salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true)
+            salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true)
 
             // Rundeck
             timeout(10) {
                 println "Waiting for Rundeck to come up.."
-                salt.cmdRun(saltMaster, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
+                salt.cmdRun(pepperEnv, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
             }
-            salt.enforceState(saltMaster, 'I@rundeck:client', 'rundeck.client', true)
+            salt.enforceState(pepperEnv, 'I@rundeck:client', 'rundeck.client', true)
 
             // Elasticsearch
             timeout(10) {
                 println 'Waiting for Elasticsearch to come up..'
-                salt.cmdRun(saltMaster, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
+                salt.cmdRun(pepperEnv, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
             }
             retry(3){
               sleep(10)
               // XXX: first run sometimes fails on update indexes, so we need to wait
-              salt.enforceState(saltMaster, 'I@elasticsearch:client', 'elasticsearch.client', true)
+              salt.enforceState(pepperEnv, 'I@elasticsearch:client', 'elasticsearch.client', true)
             }
         }
 
@@ -263,7 +265,7 @@
             //
             def adminUser
             def authorizedKeysFile
-            def adminUserCmdOut = salt.cmdRun(saltMaster, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
+            def adminUserCmdOut = salt.cmdRun(pepperEnv, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
             if (adminUserCmdOut =~ /ubuntu user exists/) {
                 adminUser = "ubuntu"
                 authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
@@ -274,7 +276,7 @@
 
             if (sshPubKey) {
                 println "Deploying provided ssh key at ${authorizedKeysFile}"
-                salt.cmdRun(saltMaster, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
+                salt.cmdRun(pepperEnv, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
             }
 
             //
@@ -284,14 +286,14 @@
                 try {
                     // Run sphinx state to install sphinx-build needed in
                     // upcomming orchestrate
-                    salt.enforceState(saltMaster, 'I@sphinx:server', 'sphinx')
+                    salt.enforceState(pepperEnv, 'I@sphinx:server', 'sphinx')
                 } catch (Throwable e) {
                     true
                 }
                 retry(3) {
                     // TODO: fix salt.orchestrateSystem
-                    // print salt.orchestrateSystem(saltMaster, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
-                    def out = salt.cmdRun(saltMaster, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
+                    // print salt.orchestrateSystem(pepperEnv, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
+                    def out = salt.cmdRun(pepperEnv, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
                     print common.prettyPrint(out)
                     if (out =~ /Command execution failed/) {
                         throw new Exception("Command execution failed")
@@ -302,9 +304,9 @@
                 // errors are just ignored here
                 true
             }
-            salt.enforceState(saltMaster, 'I@nginx:server', 'nginx')
+            salt.enforceState(pepperEnv, 'I@nginx:server', 'nginx')
 
-            def failedSvc = salt.cmdRun(saltMaster, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
+            def failedSvc = salt.cmdRun(pepperEnv, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
             if (failedSvc =~ /Command execution failed/) {
                 common.errorMsg("Some services are not running. Environment may not be fully functional!")
             }
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 3c8846c..cbb09b7 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -31,6 +31,7 @@
  *   OPENSTACK_API_PROJECT      OpenStack project to connect to
  *   OPENSTACK_API_CLIENT       Versions of OpenStack python clients
  *   OPENSTACK_API_VERSION      Version of the OpenStack API (2/3)
+ *   SLAVE_NODE                 Lable or node name where the job will be run
 
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API
  *  required for STACK_TYPE=physical
@@ -55,6 +56,7 @@
 openstack = new com.mirantis.mk.Openstack()
 aws = new com.mirantis.mk.Aws()
 orchestrate = new com.mirantis.mk.Orchestrate()
+python = new com.mirantis.mk.Python()
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
 
@@ -62,8 +64,8 @@
 overwriteFile = "/srv/salt/reclass/classes/cluster/override.yml"
 
 // Define global variables
-def saltMaster
 def venv
+def venvPepper
 def outputs = [:]
 
 def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
@@ -75,10 +77,17 @@
     def openstackCloud
 }
 
-node("python") {
+def slave_node = 'python'
+
+if (common.validInputParam('SLAVE_NODE')) {
+    slave_node = SLAVE_NODE
+}
+
+node(slave_node) {
     try {
         // Set build-specific variables
         venv = "${env.WORKSPACE}/venv"
+        venvPepper = "${env.WORKSPACE}/venvPepper"
 
         //
         // Prepare machines
@@ -112,6 +121,7 @@
 
                 // no underscore in STACK_NAME
                 STACK_NAME = STACK_NAME.replaceAll('_', '-')
+                outputs.put('stack_name', STACK_NAME)
 
                 // set description
                 currentBuild.description = "${STACK_NAME}"
@@ -151,6 +161,12 @@
                         common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
                     }
 
+                    // put formulas revision - stable, testing or nightly
+                    if (common.validInputParam('FORMULA_PKG_REVISION')) {
+                        common.infoMsg("Setting formulas revision to ${FORMULA_PKG_REVISION}")
+                        envParams.put('cfg_formula_pkg_revision', FORMULA_PKG_REVISION)
+                    }
+
                     openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
                 }
 
@@ -194,6 +210,7 @@
 
                 // set description
                 currentBuild.description = STACK_NAME
+                outputs.put('stack_name', STACK_NAME)
 
                 if (STACK_REUSE.toBoolean() == false) {
                     // get templates
@@ -229,15 +246,15 @@
 
             outputs.put('salt_api', SALT_MASTER_URL)
 
-            // Connect to Salt master
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            // Setup virtualenv for pepper
+            python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
 
         // Set up override params
         if (common.validInputParam('SALT_OVERRIDES')) {
             stage('Set Salt overrides') {
-                salt.setSaltOverrides(saltMaster,  SALT_OVERRIDES)
+                salt.setSaltOverrides(venvPepper,  SALT_OVERRIDES)
             }
         }
 
@@ -247,30 +264,34 @@
 
         if (common.checkContains('STACK_INSTALL', 'core')) {
             stage('Install core infrastructure') {
-                orchestrate.installFoundationInfra(saltMaster)
+                orchestrate.installFoundationInfra(venvPepper)
 
                 if (common.checkContains('STACK_INSTALL', 'kvm')) {
-                    orchestrate.installInfraKvm(saltMaster)
-                    orchestrate.installFoundationInfra(saltMaster)
+                    orchestrate.installInfraKvm(venvPepper)
+                    orchestrate.installFoundationInfra(venvPepper)
                 }
 
-                orchestrate.validateFoundationInfra(saltMaster)
+                orchestrate.validateFoundationInfra(venvPepper)
             }
         }
 
         // install ceph
         if (common.checkContains('STACK_INSTALL', 'ceph')) {
             stage('Install Ceph MONs') {
-                orchestrate.installCephMon(saltMaster)
+                orchestrate.installCephMon(venvPepper)
             }
 
             stage('Install Ceph OSDs') {
-                orchestrate.installCephOsd(saltMaster)
+                orchestrate.installCephOsd(venvPepper)
             }
 
 
             stage('Install Ceph clients') {
-                orchestrate.installCephClient(saltMaster)
+                orchestrate.installCephClient(venvPepper)
+            }
+
+            stage('Connect Ceph') {
+                orchestrate.connectCeph(venvPepper)
             }
         }
 
@@ -283,55 +304,58 @@
                     def awsOutputs = aws.getOutputs(venv, aws_env_vars, STACK_NAME)
                     common.prettyPrint(awsOutputs)
                     if (awsOutputs.containsKey('ControlLoadBalancer')) {
-                        salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', awsOutputs['ControlLoadBalancer']], null, true)
+                        salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', awsOutputs['ControlLoadBalancer']], null, true)
                         outputs.put('kubernetes_apiserver', 'https://' + awsOutputs['ControlLoadBalancer'])
                     }
                 }
 
                 // ensure certificates are generated properly
-                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(saltMaster, '*', ['salt.minion.cert'], true)
+                salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
 
-                orchestrate.installKubernetesInfra(saltMaster)
+                orchestrate.installKubernetesInfra(venvPepper)
             }
 
             if (common.checkContains('STACK_INSTALL', 'contrail')) {
                 stage('Install Contrail for Kubernetes') {
-                    orchestrate.installContrailNetwork(saltMaster)
-                    orchestrate.installContrailCompute(saltMaster)
+                    orchestrate.installContrailNetwork(venvPepper)
+                    orchestrate.installContrailCompute(venvPepper)
                 }
             }
 
             stage('Install Kubernetes control') {
-                orchestrate.installKubernetesControl(saltMaster)
+                orchestrate.installKubernetesControl(venvPepper)
 
                 // collect artifacts (kubeconfig)
-                writeFile(file: 'kubeconfig', text: salt.getFileContent(saltMaster, 'I@kubernetes:master and *01*', '/etc/kubernetes/admin-kube-config'))
+                writeFile(file: 'kubeconfig', text: salt.getFileContent(venvPepper, 'I@kubernetes:master and *01*', '/etc/kubernetes/admin-kube-config'))
                 archiveArtifacts(artifacts: 'kubeconfig')
             }
 
-            stage('Scale Kubernetes computes') {
-                if (STACK_COMPUTE_COUNT > 0) {
-                    if (STACK_TYPE == 'aws') {
-                        // get stack info
-                        def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
+            stage('Install Kubernetes computes') {
+                if (common.validInputParam('STACK_COMPUTE_COUNT')) {
+                    if (STACK_COMPUTE_COUNT > 0) {
+                        if (STACK_TYPE == 'aws') {
+                            // get stack info
+                            def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
 
-                        //update autoscaling group
-                        aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
+                            //update autoscaling group
+                            aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
 
-                        // wait for computes to boot up
-                        aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
-                        sleep(60)
+                            // wait for computes to boot up
+                            aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
+                            sleep(60)
 
-                    } else if (STACK_TYPE == 'heat') {
-                        envParams.put('cluster_node_count', STACK_COMPUTE_COUNT)
+                        } else if (STACK_TYPE == 'heat') {
+                            envParams.put('cluster_node_count', STACK_COMPUTE_COUNT)
 
-                        openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, "update")
-                        sleep(60)
+                            openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, "update")
+                            sleep(60)
+                        }
+
                     }
-
-                    orchestrate.installKubernetesCompute(saltMaster)
                 }
+
+                orchestrate.installKubernetesCompute(venvPepper)
             }
         }
 
@@ -340,37 +364,37 @@
             // install Infra and control, tests, ...
 
             stage('Install OpenStack infra') {
-                orchestrate.installOpenstackInfra(saltMaster)
+                orchestrate.installOpenstackInfra(venvPepper)
             }
 
             stage('Install OpenStack control') {
-                orchestrate.installOpenstackControl(saltMaster)
+                orchestrate.installOpenstackControl(venvPepper)
             }
 
             stage('Install OpenStack network') {
 
                 if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    orchestrate.installContrailNetwork(saltMaster)
+                    orchestrate.installContrailNetwork(venvPepper)
                 } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
-                    orchestrate.installOpenstackNetwork(saltMaster)
+                    orchestrate.installOpenstackNetwork(venvPepper)
                 }
 
-                salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
-                salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
+                salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
+                salt.runSaltProcessStep(venvPepper, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
             }
 
-            if (salt.testTarget(saltMaster, 'I@ironic:conductor')){
+            if (salt.testTarget(venvPepper, 'I@ironic:conductor')){
                 stage('Install OpenStack Ironic conductor') {
-                    orchestrate.installIronicConductor(saltMaster)
+                    orchestrate.installIronicConductor(venvPepper)
                 }
             }
 
 
             stage('Install OpenStack compute') {
-                orchestrate.installOpenstackCompute(saltMaster)
+                orchestrate.installOpenstackCompute(venvPepper)
 
                 if (common.checkContains('STACK_INSTALL', 'contrail')) {
-                    orchestrate.installContrailCompute(saltMaster)
+                    orchestrate.installContrailCompute(venvPepper)
                 }
             }
 
@@ -378,22 +402,22 @@
 
         if (common.checkContains('STACK_INSTALL', 'cicd')) {
             stage('Install Cicd') {
-                orchestrate.installDockerSwarm(saltMaster)
-                orchestrate.installCicd(saltMaster)
+                orchestrate.installDockerSwarm(venvPepper)
+                orchestrate.installCicd(venvPepper)
             }
         }
 
         if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
             stage('Install StackLight v1') {
-                orchestrate.installStacklightv1Control(saltMaster)
-                orchestrate.installStacklightv1Client(saltMaster)
+                orchestrate.installStacklightv1Control(venvPepper)
+                orchestrate.installStacklightv1Client(venvPepper)
             }
         }
 
         if (common.checkContains('STACK_INSTALL', 'stacklight')) {
             stage('Install StackLight') {
-                orchestrate.installDockerSwarm(saltMaster)
-                orchestrate.installStacklight(saltMaster)
+                orchestrate.installDockerSwarm(venvPepper)
+                orchestrate.installStacklight(venvPepper)
             }
         }
 
@@ -408,11 +432,11 @@
                 def output_file = image.replaceAll('/', '-') + '.output'
 
                 // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
+                test.runConformanceTests(venvPepper, 'ctl01*', TEST_K8S_API_SERVER, image)
 
                 // collect output
                 sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+                file_content = salt.getFileContent(venvPepper, 'ctl01*', '/tmp/' + output_file)
                 writeFile file: "${artifacts_dir}${output_file}", text: file_content
                 sh "cat ${artifacts_dir}${output_file}"
 
@@ -423,18 +447,18 @@
 
         if (common.checkContains('STACK_TEST', 'openstack')) {
             if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
-                test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
+                test.install_docker(venvPepper, TEST_TEMPEST_TARGET)
             }
             stage('Run OpenStack tests') {
-                test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
+                test.runTempestTests(venvPepper, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
             }
 
             stage('Copy Tempest results to config node') {
-                test.copyTempestResults(saltMaster, TEST_TEMPEST_TARGET)
+                test.copyTempestResults(venvPepper, TEST_TEMPEST_TARGET)
             }
 
             stage('Archive rally artifacts') {
-                test.archiveRallyArtifacts(saltMaster, TEST_TEMPEST_TARGET)
+                test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
             }
         }
 
@@ -443,8 +467,8 @@
             stage('Run infra tests') {
                 sleep(120)
                 def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
-                salt.cmdRun(saltMaster, 'I@salt:master', cmd, false)
-                writeFile(file: 'report.xml', text: salt.getFileContent(saltMaster, 'I@salt:master', '/root/report.xml'))
+                salt.cmdRun(venvPepper, 'I@salt:master', cmd, false)
+                writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper, 'I@salt:master', '/root/report.xml'))
                 junit(keepLongStdio: true, testResults: 'report.xml')
             }
         }
@@ -452,7 +476,7 @@
 
         stage('Finalize') {
             if (common.checkContains('STACK_INSTALL', 'finalize')) {
-                salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
+                salt.runSaltProcessStep(venvPepper, '*', 'state.apply', [], null, true)
             }
 
             outputsPretty = common.prettify(outputs)
diff --git a/cvp-sanity.groovy b/cvp-sanity.groovy
new file mode 100644
index 0000000..53c044a
--- /dev/null
+++ b/cvp-sanity.groovy
@@ -0,0 +1,38 @@
+/**
+ *
+ * Launch sanity validation of the cloud
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *
+ *   SANITY_TESTS_SET            Leave empty for full run or choose a file (test), e.g. test_mtu.py
+ *   SANITY_TESTS_REPO           CVP-sanity-checks repo to clone
+ *   PROXY                       Proxy to use for cloning repo or for pip
+ *
+ */
+
+validate = new com.mirantis.mcp.Validate()
+
+def artifacts_dir = 'validation_artifacts/'
+
+node() {
+    try{
+        stage('Initialization') {
+            validate.prepareVenv(SANITY_TESTS_REPO, PROXY)
+        }
+
+        stage('Run Infra tests') {
+            sh "mkdir -p ${artifacts_dir}"
+            validate.runSanityTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, SANITY_TESTS_SET, artifacts_dir)
+        }
+        stage ('Publish results') {
+            archiveArtifacts artifacts: "${artifacts_dir}/*"
+            junit "${artifacts_dir}/*.xml"
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    }
+}
diff --git a/deploy-k8s-deployments.groovy b/deploy-k8s-deployments.groovy
index 5a0bf9d..5989dea 100644
--- a/deploy-k8s-deployments.groovy
+++ b/deploy-k8s-deployments.groovy
@@ -1,17 +1,17 @@
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def pepperEnv = "pepperEnv"
 
 targetExpression = TARGET_MINIONS ? TARGET_MINIONS : "E@kvm01.*"
 
 node() {
-    def saltMaster
-    stage('Connect to Salt master') {
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-    }
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
     common.infoMsg("Enforcing kubernetes state..")
     stage("Update k8s control") {
         salt.enforceState(
-            saltMaster,
+            pepperEnv,
             targetExpression,
             'kubernetes.control',
             true
@@ -28,13 +28,13 @@
 
         if (extraCommand) {
             salt.cmdRun(
-                saltMaster,
+                pepperEnv,
                 targetExpression,
                 extraCommand
             )
         }
         out = salt.cmdRun(
-            saltMaster,
+            pepperEnv,
             targetExpression,
             '/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done; jobs=$(hyperkube kubectl get jobs -o name); find /srv/kubernetes/jobs -type f -name "*.yml" | while read i; do name=$(grep "name:" $i | head -1 | awk "{print $NF}"); echo $jobs|grep $name >/dev/null || (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x);done\''
         )
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index c23f1c3..926fd88 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -31,6 +31,23 @@
       stage("checkout") {
          git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
       }
+
+      if (IMAGE_BRANCH == "master") {
+        try {
+          def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
+          def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
+          imageTagsList << tag
+          revision = revision ? revision : "0"
+          imageTagsList << "${tag}-${revision}"
+
+          if (!imageTagsList.contains("latest")) {
+            imageTagsList << "latest"
+          }
+        } catch (Exception e) {
+          common.infoMsg("Impossible to find any tag")
+        }
+      }
+
       stage("build") {
         common.infoMsg("Building docker image ${IMAGE_NAME}")
         dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
diff --git a/docker-cleanup-pipeline.groovy b/docker-cleanup-pipeline.groovy
index 1d7b3b4..677efdf 100644
--- a/docker-cleanup-pipeline.groovy
+++ b/docker-cleanup-pipeline.groovy
@@ -9,14 +9,17 @@
 common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
 jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
 
 node{
   def saltMaster;
-  stage("Connect to MCP salt master"){
-    saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+  stage('Setup virtualenv for Pepper') {
+    python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
   }
   stage("Clean old containers"){
-    salt.cmdRun(saltMaster, 'I@jenkins:slave', """
+    salt.cmdRun(pepperEnv, 'I@jenkins:slave', """
         docker ps --format='{{.ID}}' | xargs -n 1 -r docker inspect \\
         -f '{{.ID}} {{.State.Running}} {{.State.StartedAt}}' \\
         | awk '\$2 == "true" && \$3 <= "'\$(date -d '${TEST_DATE_STRING}' -Ins --utc \\
@@ -25,6 +28,6 @@
         """, false)
   }
   stage("Run docker system prune"){
-    salt.cmdRun(saltMaster, 'I@jenkins:slave', "docker system prune -f")
+    salt.cmdRun(pepperEnv, 'I@jenkins:slave', "docker system prune -f")
   }
 }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 4326e7e..f94965d 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -69,7 +69,7 @@
             }
         }
 
-        def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight", "ceph"]
+        def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
         for (product in productList) {
 
             // get templateOutputDir and productDir
@@ -138,7 +138,7 @@
             }
         }
 
-        stage("Generate config drive") {
+        stage("Generate config drives") {
             // apt package genisoimage is required for this stage
 
             // download create-config-drive
@@ -163,15 +163,38 @@
                 sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
             }
 
-            // create config-drive
+            // create cfg config-drive
             sh "./create-config-drive ${args}"
             sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-            // save iso to artifacts
+
+            // save cfg iso to artifacts
             archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+            if (templateContext.default_context.offline_deployment && templateContext.default_context.offline_deployment == 'True'){
+                def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+                def user_data_script_apt_url = "https://raw.githubusercontent.com/richardfelkl/scripts/master/mirror_config.sh"
+                sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
+
+                def smc_apt = [:]
+                smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+                smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_address']
+                smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+                smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+                for (i in common.entries(smc_apt)) {
+                    sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+                }
+
+                // create apt config-drive
+                sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+                sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+                // save apt iso to artifacts
+                archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+            }
         }
 
         stage ('Save changes reclass model') {
-
             sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
             archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
 
@@ -200,4 +223,4 @@
         }
          // common.sendNotification(currentBuild.result,"",["slack"])
     }
-}
\ No newline at end of file
+}
diff --git a/ironic-node-provision-pipeline.groovy b/ironic-node-provision-pipeline.groovy
index 1c96eaa..05e5313 100644
--- a/ironic-node-provision-pipeline.groovy
+++ b/ironic-node-provision-pipeline.groovy
@@ -38,20 +38,20 @@
 orchestrate = new com.mirantis.mk.Orchestrate()
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
 
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
 def venv
 def outputs = [:]
 
 def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
 
-def waitIronicDeployment(master, node_names, target, auth_profile, deploy_timeout=60) {
+def waitIronicDeployment(pepperEnv, node_names, target, auth_profile, deploy_timeout=60) {
     def failed_nodes = []
     timeout (time:  deploy_timeout.toInteger(), unit: 'MINUTES'){
         while (node_names.size() != 0) {
             common.infoMsg("Waiting for nodes: " + node_names.join(", ") + " to be deployed.")
-            res = salt.runSaltProcessStep(master, target, 'ironicng.list_nodes', ["profile=${auth_profile}"], null, false)
+            res = salt.runSaltProcessStep(pepperEnv, target, 'ironicng.list_nodes', ["profile=${auth_profile}"], null, false)
             for (n in res['return'][0].values()[0]['nodes']){
                 if (n['name'] in node_names) {
                     if (n['provision_state'] == 'active'){
@@ -151,8 +151,8 @@
 
         outputs.put('salt_api', SALT_MASTER_URL)
 
-        // Connect to Salt master
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
 
 
         def nodes_to_deploy=[]
@@ -160,10 +160,10 @@
         stage('Trigger deployment on nodes') {
             if (IRONIC_DEPLOY_PARTITION_PROFILE == '' && IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES == 'all'){
                 common.infoMsg("Trigger ironic.deploy")
-                salt.enforceState(saltMaster, RUN_TARGET, ['ironic.deploy'], true)
+                salt.enforceState(pepperEnv, RUN_TARGET, ['ironic.deploy'], true)
             } else {
                 if (IRONIC_DEPLOY_NODES == 'all'){
-                     res = salt.runSaltProcessStep(saltMaster, RUN_TARGET, 'ironicng.list_nodes', ["profile=${IRONIC_AUTHORIZATION_PROFILE}"], null, true)
+                     res = salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.list_nodes', ["profile=${IRONIC_AUTHORIZATION_PROFILE}"], null, true)
                      // We trigger deployment on single salt minion
                      for (n in res['return'][0].values()[0]['nodes']){
                         nodes_to_deploy.add(n['name'])
@@ -180,13 +180,13 @@
 
                 for (n in nodes_to_deploy){
                     common.infoMsg("Trigger deployment of ${n}")
-                  salt.runSaltProcessStep(saltMaster, RUN_TARGET, 'ironicng.deploy_node', ["${n}"] + cmd_params, null, true)
+                  salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.deploy_node', ["${n}"] + cmd_params, null, true)
                 }
             }
         }
 
         stage('Waiting for deployment is done.') {
-            def failed_nodes = waitIronicDeployment(saltMaster, nodes_to_deploy, RUN_TARGET, IRONIC_AUTHORIZATION_PROFILE, IRONIC_DEPLOY_TIMEOUT)
+            def failed_nodes = waitIronicDeployment(pepperEnv, nodes_to_deploy, RUN_TARGET, IRONIC_AUTHORIZATION_PROFILE, IRONIC_DEPLOY_TIMEOUT)
             if (failed_nodes){
                 common.errorMsg("Some nodes: " + failed_nodes.join(", ") + " are failed to deploy")
                 currentBuild.result = 'FAILURE'
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index c605421..06dc48e 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -295,7 +295,7 @@
                 def output_file = image.replaceAll('/', '-') + '.output'
 
                 // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
+                test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
 
                 // collect output
                 sh "mkdir -p ${artifacts_dir}"
@@ -308,13 +308,13 @@
             }
 
             stage('Run k8s conformance e2e tests') {
-                //test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, TEST_K8S_CONFORMANCE_IMAGE)
+                //test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, TEST_K8S_CONFORMANCE_IMAGE)
 
                 def image = TEST_K8S_CONFORMANCE_IMAGE
                 def output_file = image.replaceAll('/', '-') + '.output'
 
                 // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
+                test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
 
                 // collect output
                 sh "mkdir -p ${artifacts_dir}"
diff --git a/mk-k8s-simple-deploy-pipeline.groovy b/mk-k8s-simple-deploy-pipeline.groovy
index b86e6da..e88d482 100644
--- a/mk-k8s-simple-deploy-pipeline.groovy
+++ b/mk-k8s-simple-deploy-pipeline.groovy
@@ -29,13 +29,15 @@
 salt = new com.mirantis.mk.Salt()
 orchestrate = new com.mirantis.mk.Orchestrate()
 test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
 artifacts_dir = "_artifacts"
 
 node {
 
     // connection objects
     def openstackCloud
-    def saltMaster
 
     // value defaults
     def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
@@ -70,34 +72,34 @@
     stage("Connect to Salt master") {
         saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
         saltMasterUrl = "http://${saltMasterHost}:8088"
-        saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
+        python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
     }
 
     stage("Install core infra") {
-        orchestrate.installFoundationInfra(saltMaster)
-        orchestrate.validateFoundationInfra(saltMaster)
+        orchestrate.installFoundationInfra(pepperEnv)
+        orchestrate.validateFoundationInfra(pepperEnv)
     }
 
     stage("Install Kubernetes infra") {
-        orchestrate.installOpenstackMcpInfra(saltMaster)
+        orchestrate.installOpenstackMcpInfra(pepperEnv)
     }
 
     stage("Install Kubernetes control") {
-        orchestrate.installOpenstackMcpControl(saltMaster)
+        orchestrate.installOpenstackMcpControl(pepperEnv)
     }
 
     if (RUN_TESTS == "1") {
         sleep(30)
         stage('Run k8s bootstrap tests') {
-            test.runConformanceTests(saltMaster, K8S_API_SERVER, 'tomkukral/k8s-scripts')
+            test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, 'tomkukral/k8s-scripts')
         }
 
         stage("Run k8s conformance e2e tests") {
-            test.runConformanceTests(saltMaster, K8S_API_SERVER, CONFORMANCE_IMAGE)
+            test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, CONFORMANCE_IMAGE)
         }
 
         stage("Copy k8s e2e test output to config node ") {
-            test.copyTestsOutput(saltMaster,CONFORMANCE_IMAGE)
+            test.copyTestsOutput(pepperEnv,CONFORMANCE_IMAGE)
         }
 
         stage("Copy k8s e2e test output to host ") {
@@ -105,7 +107,7 @@
                 mkdir ${env.WORKSPACE}/${artifacts_dir}
                '''
             try {
-                test.catTestsOutput(saltMaster,CONFORMANCE_IMAGE) >> ${env.WORKSPACE}/${artifacts_dir}/$CONFORMANCE_IMAGE
+                test.catTestsOutput(pepperEnv,CONFORMANCE_IMAGE) >> ${env.WORKSPACE}/${artifacts_dir}/$CONFORMANCE_IMAGE
             } catch (InterruptedException x) {
                 echo "The job was aborted"
             } finally {
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index 00b0e7f..311cfef 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -17,8 +17,9 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def targetLiveSubset
 def targetLiveAll
 def minions
@@ -34,40 +35,40 @@
 def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
 def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
 
-def void runCommonCommands(target, command, args, check, salt, saltMaster, common) {
+def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
 
-    out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
+    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
     salt.printSaltCommandResult(out)
     // wait until $check is in correct state
     if ( check == "nodetool status" ) {
-        salt.commandStatus(saltMaster, target, check, 'Status=Up')
+        salt.commandStatus(pepperEnv, target, check, 'Status=Up')
     } else if ( check == "contrail-status" ) {
-        salt.commandStatus(saltMaster, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+        salt.commandStatus(pepperEnv, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
     }
 
-    //out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
+    //out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
     //salt.printSaltCommandResult(out)
     //input message: "Please check the output of \'${check}\' and continue if it is correct."
 }
 
 node() {
 
-    stage('Connect to Salt API') {
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
     }
 
     if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true && !errorOccured) {
 
         stage('Opencontrail controllers upgrade') {
 
-            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
 
             oc_component_repo = oc_component_repo['return'][0].values()[0]
 
             try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
             } catch (Exception er) {
                 errorOccured = true
                 common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -75,14 +76,14 @@
             }
 
             try {
-                salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
             } catch (Exception er) {
                 common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
                 return
             }
 
             try {
-                salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+                salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
             } catch (Exception er) {
                 common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
                 return
@@ -92,29 +93,29 @@
             check = 'nodetool status'
 
             // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
             // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
             // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
 
             args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
             check = 'contrail-status'
 
             // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
             // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
             // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
 
             try {
-                salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
             } catch (Exception er) {
                 common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
             }
 
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
             salt.printSaltCommandResult(out)
 
             common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
@@ -129,14 +130,14 @@
 
         stage('Opencontrail analytics upgrade') {
 
-            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
 
             oc_component_repo = oc_component_repo['return'][0].values()[0]
 
             try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
             } catch (Exception er) {
                 errorOccured = true
                 common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -147,29 +148,29 @@
             check = 'nodetool status'
 
             // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
             // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
             // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
 
             args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
             check = 'contrail-status'
 
             // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
             // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
             // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
 
             try {
-                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
             } catch (Exception er) {
                 common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
             }
 
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
             salt.printSaltCommandResult(out)
         }
     }
@@ -179,7 +180,7 @@
         try {
 
             stage('List targeted compute servers') {
-                minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
 
                 if (minions.isEmpty()) {
                     throw new Exception("No minion was targeted")
@@ -199,13 +200,13 @@
 
             stage("Opencontrail compute upgrade on sample nodes") {
 
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
-                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
                 } catch (Exception er) {
                     errorOccured = true
                     common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -215,21 +216,21 @@
                 args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
                 check = 'contrail-status'
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
                 salt.printSaltCommandResult(out)
 
                 try {
-                    salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
                 } catch (Exception er) {
                     common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
                 }
 
-                salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
 
                 //sleep(10)
-                salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
                 salt.printSaltCommandResult(out)
             }
 
@@ -238,13 +239,13 @@
             }
             stage("Opencontrail compute upgrade on all targeted nodes") {
 
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
-                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
                 } catch (Exception er) {
                     common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
                     return
@@ -253,20 +254,20 @@
                 args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
                 check = 'contrail-status'
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
                 salt.printSaltCommandResult(out)
 
                 try {
-                    salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
                 } catch (Exception er) {
                     common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
                 }
 
-                salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
                 //sleep(10)
-                salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
                 salt.printSaltCommandResult(out)
             }
 
@@ -287,13 +288,13 @@
 
        stage('Opencontrail controllers rollback') {
 
-            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
             oc_component_repo = oc_component_repo['return'][0].values()[0]
 
             try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
             } catch (Exception er) {
                 errorOccured = true
                 common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -304,29 +305,29 @@
             check = 'nodetool status'
 
             // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
             // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
             // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
 
             args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
             check = 'contrail-status'
 
             // ntw01
-            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
             // ntw02
-            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
             // ntw03
-            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
 
             try {
-                salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
             } catch (Exception er) {
                 common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
             }
 
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
             salt.printSaltCommandResult(out)
 
             common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
@@ -341,13 +342,13 @@
 
         stage('Opencontrail analytics rollback') {
 
-            oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+            oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
             oc_component_repo = oc_component_repo['return'][0].values()[0]
 
             try {
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
-                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
             } catch (Exception er) {
                 errorOccured = true
                 common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -358,29 +359,29 @@
             check = 'nodetool status'
 
             // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
             // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
             // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
 
             args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
             check = 'contrail-status'
 
             // nal01
-            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
             // nal02
-            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
             // nal03
-            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+            runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
 
             try {
-                salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
             } catch (Exception er) {
                 common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
             }
 
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
             salt.printSaltCommandResult(out)
         }
     }
@@ -390,7 +391,7 @@
         try {
 
             stage('List targeted compute servers') {
-                minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
 
                 if (minions.isEmpty()) {
                     throw new Exception("No minion was targeted")
@@ -410,13 +411,13 @@
 
             stage("Opencontrail compute rollback on sample nodes") {
 
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
-                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
                 } catch (Exception er) {
                     errorOccured = true
                     common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -426,20 +427,20 @@
                 args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
                 check = 'contrail-status'
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
                 salt.printSaltCommandResult(out)
 
                 try {
-                    salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
                 } catch (Exception er) {
                     common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
                 }
 
-                salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
                 //sleep(10)
-                salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
                 salt.printSaltCommandResult(out)
             }
 
@@ -449,13 +450,13 @@
 
             stage("Opencontrail compute upgrade on all targeted nodes") {
 
-                oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+                oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
                 oc_component_repo = oc_component_repo['return'][0].values()[0]
 
                 try {
-                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
-                    salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
-                    salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
                 } catch (Exception er) {
                     common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
                     return
@@ -464,21 +465,21 @@
                 args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
                 check = 'contrail-status'
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
                 salt.printSaltCommandResult(out)
 
                 try {
-                    salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
                 } catch (Exception er) {
                     common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
                 }
 
-                salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
 
                 //sleep(10)
-                salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
 
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
                 salt.printSaltCommandResult(out)
             }
 
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index a43c2b9..82a3e40 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -10,8 +10,9 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def minions
 def result
 def command
@@ -21,12 +22,12 @@
 node() {
     try {
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('List target servers') {
-            minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
 
             if (minions.isEmpty()) {
                 throw new Exception("No minion was targeted")
@@ -49,49 +50,49 @@
         }
 
         stage("Setup repositories") {
-            salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo', true)
+            salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true)
         }
 
         stage("Upgrade packages") {
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'pkg.upgrade', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], null, true)
         }
 
         stage("Setup networking") {
             // Sync all of the modules from the salt master.
-            salt.syncAll(saltMaster, targetLiveAll)
+            salt.syncAll(pepperEnv, targetLiveAll)
 
             // Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
 
             // Restart salt-minion to take effect.
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
 
             // Configure networking excluding vhost0 interface.
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.apply',  ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply',  ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
 
             // Kill unnecessary processes ifup/ifdown which is stuck from previous state linux.network.
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
 
             // Restart networking to bring UP all interfaces.
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
         }
 
         stage("Highstate compute") {
             // Execute highstate without state opencontrail.client.
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
 
             // Apply nova state to remove libvirt default bridge virbr0.
-            salt.enforceState(saltMaster, targetLiveAll, 'nova', true)
+            salt.enforceState(pepperEnv, targetLiveAll, 'nova', true)
 
             // Execute highstate.
-            salt.enforceHighstate(saltMaster, targetLiveAll, true)
+            salt.enforceHighstate(pepperEnv, targetLiveAll, true)
 
             // Restart supervisor-vrouter.
-            salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
+            salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
 
             // Apply salt,collectd to update information about current network interfaces.
-            salt.enforceState(saltMaster, targetLiveAll, 'salt,collectd', true)
+            salt.enforceState(pepperEnv, targetLiveAll, 'salt,collectd', true)
         }
 
     } catch (Throwable e) {
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
index 095697d..46634bc 100644
--- a/openstack-compute-upgrade.groovy
+++ b/openstack-compute-upgrade.groovy
@@ -12,8 +12,9 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def targetTestSubset
 def targetLiveSubset
 def targetLiveAll
@@ -27,12 +28,12 @@
 node() {
     try {
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('List target servers') {
-            minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
 
             if (minions.isEmpty()) {
                 throw new Exception("No minion was targeted")
@@ -55,14 +56,14 @@
 
 
         stage("Add new repos on test nodes") {
-            salt.enforceState(saltMaster, targetTestSubset, 'linux.system.repo')
+            salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
         }
 
 
         opencontrail = null
 
         try {
-            opencontrail = salt.cmdRun(saltMaster, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
+            opencontrail = salt.cmdRun(pepperEnv, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
             print(opencontrail)
         } catch (Exception er) {
             common.infoMsg("opencontrail is not used")
@@ -70,13 +71,13 @@
 
         if(opencontrail != null) {
             stage('Remove OC component from repos on test nodes') {
-                salt.cmdRun(saltMaster, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
-                salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.refresh_db', [], null, true)
+                salt.cmdRun(pepperEnv, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+                salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.refresh_db', [], null, true)
             }
         }
 
         stage("List package upgrades") {
-            salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
         }
 
         stage('Confirm upgrade on sample nodes') {
@@ -84,13 +85,13 @@
         }
 
         stage("Add new repos on sample nodes") {
-            salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+            salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
         }
 
         if(opencontrail != null) {
             stage('Remove OC component from repos on sample nodes') {
-                salt.cmdRun(saltMaster, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
-                salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'pkg.refresh_db', [], null, true)
+                salt.cmdRun(pepperEnv, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'pkg.refresh_db', [], null, true)
             }
         }
 
@@ -98,7 +99,7 @@
 
         stage('Test upgrade on sample') {
             try {
-                salt.cmdRun(saltMaster, targetLiveSubset, args)
+                salt.cmdRun(pepperEnv, targetLiveSubset, args)
             } catch (Exception er) {
                 print(er)
             }
@@ -112,14 +113,14 @@
         args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
 
         stage('Apply package upgrades on sample') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
             salt.printSaltCommandResult(out)
         }
 
         openvswitch = null
 
         try {
-            openvswitch = salt.cmdRun(saltMaster, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
+            openvswitch = salt.cmdRun(pepperEnv, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
         } catch (Exception er) {
             common.infoMsg("openvswitch is not used")
         }
@@ -128,21 +129,21 @@
             args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
 
             stage('Start ovs on sample nodes') {
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
                 salt.printSaltCommandResult(out)
             }
             stage("Run salt states on sample nodes") {
-                salt.enforceState(saltMaster, targetLiveSubset, ['nova', 'neutron'])
+                salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'neutron'])
             }
         } else {
             stage("Run salt states on sample nodes") {
-                salt.enforceState(saltMaster, targetLiveSubset, ['nova', 'linux.system.repo'])
+                salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'linux.system.repo'])
             }
         }
 
         stage("Run Highstate on sample nodes") {
             try {
-                salt.enforceHighstate(saltMaster, targetLiveSubset)
+                salt.enforceHighstate(pepperEnv, targetLiveSubset)
             } catch (Exception er) {
                 common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
             }
@@ -155,20 +156,20 @@
         }
 
         stage("Add new repos on all targeted nodes") {
-            salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+            salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
         }
 
         if(opencontrail != null) { 
             stage('Remove OC component from repos on all targeted nodes') {
-                salt.cmdRun(saltMaster, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
-                salt.runSaltProcessStep(saltMaster, targetLiveAll, 'pkg.refresh_db', [], null, true)
+                salt.cmdRun(pepperEnv, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.refresh_db', [], null, true)
             }
         }
 
         args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
 
         stage('Apply package upgrades on all targeted nodes') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
             salt.printSaltCommandResult(out)
         }
 
@@ -176,21 +177,21 @@
             args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
 
             stage('Start ovs on all targeted nodes') {
-                out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
                 salt.printSaltCommandResult(out)
             }
             stage("Run salt states on all targeted nodes") {
-                salt.enforceState(saltMaster, targetLiveAll, ['nova', 'neutron'])
+                salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'neutron'])
             }
         } else {
             stage("Run salt states on all targeted nodes") {
-                salt.enforceState(saltMaster, targetLiveAll, ['nova', 'linux.system.repo'])
+                salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'linux.system.repo'])
             }
         }
 
         stage("Run Highstate on all targeted nodes") {
             try {
-                salt.enforceHighstate(saltMaster, targetLiveAll)
+                salt.enforceHighstate(pepperEnv, targetLiveAll)
             } catch (Exception er) {
                 common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
             }
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 9680f24..76960e9 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -12,14 +12,14 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-
-def saltMaster
+def pepperEnv = "pepperEnv"
 
 node() {
 
-    stage('Connect to Salt API') {
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
     }
 
     if (STAGE_TEST_UPGRADE.toBoolean() == true) {
@@ -27,175 +27,175 @@
 
 
             try {
-                salt.enforceState(saltMaster, 'I@salt:master', 'reclass')
+                salt.enforceState(pepperEnv, 'I@salt:master', 'reclass')
             } catch (Exception e) {
                 common.warningMsg("Some parts of Reclass state failed. The most probable reasons were uncommited changes. We should continue to run")
             }
 
             try {
-                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+                salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
             } catch (Exception e) {
                 common.warningMsg("No response from some minions. We should continue to run")
             }
 
             try {
-                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+                salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
             } catch (Exception e) {
                 common.warningMsg("No response from some minions. We should continue to run")
             }
 
-            def _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+            def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
             def domain = _pillar['return'][0].values()[0].values()[0]
             print(_pillar)
             print(domain)
 
             // read backupninja variable
-            _pillar = salt.getPillar(saltMaster, 'I@backupninja:client', '_param:backupninja_backup_host')
+            _pillar = salt.getPillar(pepperEnv, 'I@backupninja:client', '_param:backupninja_backup_host')
             def backupninja_backup_host = _pillar['return'][0].values()[0]
             print(_pillar)
             print(backupninja_backup_host)
 
-            _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
             def kvm01 = _pillar['return'][0].values()[0].values()[0]
             print(_pillar)
             print(kvm01)
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
             def upgNodeProvider = _pillar['return'][0].values()[0]
             print(_pillar)
             print(upgNodeProvider)
 
 
-            salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
 
 
             try {
-                salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d upg01.${domain} -y")
+                salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d upg01.${domain} -y")
             } catch (Exception e) {
                 common.warningMsg("upg01.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
             }
 
 
             // salt 'kvm02*' state.sls salt.control
-            salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
+            salt.enforceState(pepperEnv, "${upgNodeProvider}", 'salt.control')
 
             // wait until upg node is registered in salt-key
-            salt.minionPresent(saltMaster, 'I@salt:master', 'upg01')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'upg01')
 
             // salt '*' saltutil.refresh_pillar
-            salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.refresh_pillar', [], null, true)
             // salt '*' saltutil.sync_all
-            salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.sync_all', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.sync_all', [], null, true)
 
             // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
             try {
-                salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+                salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
             } catch (Exception e) {
                 common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
             }
-            salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+            salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
 
             // salt "upg*" state.sls rabbitmq
-            salt.enforceState(saltMaster, 'upg*', ['rabbitmq', 'memcached'])
+            salt.enforceState(pepperEnv, 'upg*', ['rabbitmq', 'memcached'])
             try {
-                salt.enforceState(saltMaster, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
+                salt.enforceState(pepperEnv, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
             } catch (Exception e) {
                 common.warningMsg('salt-minion was restarted. We should continue to run')
             }
             try {
-                salt.enforceState(saltMaster, 'I@backupninja:server', ['salt.minion'])
+                salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
             } catch (Exception e) {
                 common.warningMsg('salt-minion was restarted. We should continue to run')
             }
             // salt '*' state.apply salt.minion.grains
-            //salt.enforceState(saltMaster, '*', 'salt.minion.grains')
+            //salt.enforceState(pepperEnv, '*', 'salt.minion.grains')
             // salt -C 'I@backupninja:server' state.sls backupninja
-            salt.enforceState(saltMaster, 'I@backupninja:server', 'backupninja')
+            salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
             // salt -C 'I@backupninja:client' state.sls backupninja
-            salt.enforceState(saltMaster, 'I@backupninja:client', 'backupninja')
-            salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
+            salt.enforceState(pepperEnv, 'I@backupninja:client', 'backupninja')
+            salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
             try {
-                salt.cmdRun(saltMaster, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
+                salt.cmdRun(pepperEnv, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
             } catch (Exception e) {
                 common.warningMsg('The ARP entry does not exist. We should continue to run.')
             }
-            salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
-            salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
-            salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+            salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
+            salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
+            salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
 
-            salt.enforceState(saltMaster, 'I@xtrabackup:server', 'xtrabackup')
-            salt.enforceState(saltMaster, 'I@xtrabackup:client', 'openssh.client')
-            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+            salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
+            salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
+            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
 
-            def databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
+            def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
             if(databases && databases != ""){
                 def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
                 for( i = 0; i < databasesList.size(); i++){
                     if(databasesList[i].toLowerCase().contains('upgrade')){
-                        salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
                         common.warningMsg("removing database ${databasesList[i]}")
-                        salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+                        salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
                     }
                 }
-                salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
+                salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
             }else{
                 common.errorMsg("No _upgrade databases were returned")
             }
 
             try {
-                salt.enforceState(saltMaster, 'upg*', 'keystone.server')
-                salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+                salt.enforceState(pepperEnv, 'upg*', 'keystone.server')
+                salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
             } catch (Exception e) {
                 common.warningMsg('Restarting Apache2')
-                salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
             }
             try {
-                salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
             } catch (Exception e) {
                 common.warningMsg('running keystone.client state again')
-                salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
             }
             try {
-                salt.enforceState(saltMaster, 'upg*', 'glance')
+                salt.enforceState(pepperEnv, 'upg*', 'glance')
             } catch (Exception e) {
                 common.warningMsg('running glance state again')
-                salt.enforceState(saltMaster, 'upg*', 'glance')
+                salt.enforceState(pepperEnv, 'upg*', 'glance')
             }
-            salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+            salt.enforceState(pepperEnv, 'upg*', 'keystone.server')
             try {
-                salt.enforceState(saltMaster, 'upg*', 'nova')
+                salt.enforceState(pepperEnv, 'upg*', 'nova')
             } catch (Exception e) {
                 common.warningMsg('running nova state again')
-                salt.enforceState(saltMaster, 'upg*', 'nova')
+                salt.enforceState(pepperEnv, 'upg*', 'nova')
             }
             // run nova state again as sometimes nova does not enforce itself for some reason
             try {
-                salt.enforceState(saltMaster, 'upg*', 'nova')
+                salt.enforceState(pepperEnv, 'upg*', 'nova')
             } catch (Exception e) {
                 common.warningMsg('running nova state again')
-                salt.enforceState(saltMaster, 'upg*', 'nova')
+                salt.enforceState(pepperEnv, 'upg*', 'nova')
             }
             try {
-                salt.enforceState(saltMaster, 'upg*', 'cinder')
+                salt.enforceState(pepperEnv, 'upg*', 'cinder')
             } catch (Exception e) {
                 common.warningMsg('running cinder state again')
-                salt.enforceState(saltMaster, 'upg*', 'cinder')
+                salt.enforceState(pepperEnv, 'upg*', 'cinder')
             }
             try {
-                salt.enforceState(saltMaster, 'upg*', 'neutron')
+                salt.enforceState(pepperEnv, 'upg*', 'neutron')
             } catch (Exception e) {
                 common.warningMsg('running neutron state again')
-                salt.enforceState(saltMaster, 'upg*', 'neutron')
+                salt.enforceState(pepperEnv, 'upg*', 'neutron')
             }
             try {
-                salt.enforceState(saltMaster, 'upg*', 'heat')
+                salt.enforceState(pepperEnv, 'upg*', 'heat')
             } catch (Exception e) {
                 common.warningMsg('running heat state again')
-                salt.enforceState(saltMaster, 'upg*', 'heat')
+                salt.enforceState(pepperEnv, 'upg*', 'heat')
             }
-            salt.cmdRun(saltMaster, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+            salt.cmdRun(pepperEnv, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
 
             if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
                 stage('Ask for manual confirmation') {
@@ -209,163 +209,163 @@
         stage('Real upgrade') {
             // # actual upgrade
 
-            _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+            _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
             domain = _pillar['return'][0].values()[0].values()[0]
             print(_pillar)
             print(domain)
 
-            _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
             kvm01 = _pillar['return'][0].values()[0].values()[0]
             print(_pillar)
             print(kvm01)
 
             def errorOccured = false
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
             def ctl01NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
             def ctl02NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
             def ctl03NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
             def prx01NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
             def prx02NodeProvider = _pillar['return'][0].values()[0]
 
 
-            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
 
 
             try {
-                salt.cmdRun(saltMaster, "${prx01NodeProvider}", "[ ! -f /root/prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak")
+                salt.cmdRun(pepperEnv, "${prx01NodeProvider}", "[ ! -f /root/prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak")
             } catch (Exception e) {
                 common.warningMsg('File already exists')
             }
             try {
-                salt.cmdRun(saltMaster, "${prx02NodeProvider}", "[ ! -f /root/prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak")
+                salt.cmdRun(pepperEnv, "${prx02NodeProvider}", "[ ! -f /root/prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak")
             } catch (Exception e) {
                 common.warningMsg('File already exists')
             }
             try {
-                salt.cmdRun(saltMaster, "${ctl01NodeProvider}", "[ ! -f /root/ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak")
+                salt.cmdRun(pepperEnv, "${ctl01NodeProvider}", "[ ! -f /root/ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak")
             } catch (Exception e) {
                 common.warningMsg('File already exists')
             }
             try {
-                salt.cmdRun(saltMaster, "${ctl02NodeProvider}", "[ ! -f /root/ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak")
+                salt.cmdRun(pepperEnv, "${ctl02NodeProvider}", "[ ! -f /root/ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak")
             } catch (Exception e) {
                 common.warningMsg('File already exists')
             }
             try {
-                salt.cmdRun(saltMaster, "${ctl03NodeProvider}", "[ ! -f /root/ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak")
+                salt.cmdRun(pepperEnv, "${ctl03NodeProvider}", "[ ! -f /root/ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak")
             } catch (Exception e) {
                 common.warningMsg('File already exists')
             }
 
 
-            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.undefine', ["prx01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.undefine', ["prx02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.undefine', ["ctl01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.undefine', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.undefine', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.undefine', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
 
-            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
 
             try {
-                salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+                salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
             } catch (Exception e) {
                 common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
             }
 
             // salt 'kvm*' state.sls salt.control
-            salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
+            salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
 
             // wait until ctl and prx nodes are registered in salt-key
-            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl01')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl02')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl03')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'prx01')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'prx02')
 
 
             // salt '*' saltutil.refresh_pillar
-            salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
             // salt '*' saltutil.sync_all
-            salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
 
             try {
-                salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+                salt.enforceState(pepperEnv, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
             } catch (Exception e) {
                 common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
             }
-            salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+            salt.enforceState(pepperEnv, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
 
             // salt 'ctl*' state.sls keepalived
             // salt 'ctl*' state.sls haproxy
-            salt.enforceState(saltMaster, 'ctl*', ['keepalived', 'haproxy'])
+            salt.enforceState(pepperEnv, 'ctl*', ['keepalived', 'haproxy'])
             // salt 'ctl*' service.restart rsyslog
-            salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['rsyslog'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['rsyslog'], null, true)
             // salt "ctl*" state.sls memcached
             // salt "ctl*" state.sls keystone.server
             try {
                 try {
-                    salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
-                    salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
+                    salt.enforceState(pepperEnv, 'ctl*', ['memcached', 'keystone.server'])
+                    salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['apache2'], null, true)
                 } catch (Exception e) {
                     common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
-                    salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
-                    salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                    salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['apache2'], null, true)
+                    salt.enforceState(pepperEnv, 'ctl*', 'keystone.server')
                 }
                 // salt 'ctl01*' state.sls keystone.client
                 try {
-                    salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+                    salt.enforceState(pepperEnv, 'I@keystone:client and ctl*', 'keystone.client')
                 } catch (Exception e) {
                     common.warningMsg('running keystone.client state again')
-                    salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+                    salt.enforceState(pepperEnv, 'I@keystone:client and ctl*', 'keystone.client')
                 }
                 try {
-                    salt.enforceState(saltMaster, 'ctl*', 'glance')
+                    salt.enforceState(pepperEnv, 'ctl*', 'glance')
                 } catch (Exception e) {
                     common.warningMsg('running glance state again')
-                    salt.enforceState(saltMaster, 'ctl*', 'glance')
+                    salt.enforceState(pepperEnv, 'ctl*', 'glance')
                 }                // salt 'ctl*' state.sls glusterfs.client
-                salt.enforceState(saltMaster, 'ctl*', 'glusterfs.client')
+                salt.enforceState(pepperEnv, 'ctl*', 'glusterfs.client')
                 // salt 'ctl*' state.sls keystone.server
-                salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                salt.enforceState(pepperEnv, 'ctl*', 'keystone.server')
                 // salt 'ctl*' state.sls nova
                 try {
-                    salt.enforceState(saltMaster, 'ctl*', 'nova')
+                    salt.enforceState(pepperEnv, 'ctl*', 'nova')
                 } catch (Exception e) {
                     common.warningMsg('running nova state again')
-                    salt.enforceState(saltMaster, 'ctl*', 'nova')
+                    salt.enforceState(pepperEnv, 'ctl*', 'nova')
                 }
                 // salt 'ctl*' state.sls cinder
                 try {
-                    salt.enforceState(saltMaster, 'ctl*', 'cinder')
+                    salt.enforceState(pepperEnv, 'ctl*', 'cinder')
                 } catch (Exception e) {
                     common.warningMsg('running cinder state again')
-                    salt.enforceState(saltMaster, 'ctl*', 'cinder')
+                    salt.enforceState(pepperEnv, 'ctl*', 'cinder')
                 }
                 try {
-                    salt.enforceState(saltMaster, 'ctl*', 'neutron')
+                    salt.enforceState(pepperEnv, 'ctl*', 'neutron')
                 } catch (Exception e) {
                     common.warningMsg('running neutron state again')
-                    salt.enforceState(saltMaster, 'ctl*', 'neutron')
+                    salt.enforceState(pepperEnv, 'ctl*', 'neutron')
                 }
                 // salt 'ctl*' state.sls heat
                 try {
-                    salt.enforceState(saltMaster, 'ctl*', 'heat')
+                    salt.enforceState(pepperEnv, 'ctl*', 'heat')
                 } catch (Exception e) {
                     common.warningMsg('running heat state again')
-                    salt.enforceState(saltMaster, 'ctl*', 'heat')
+                    salt.enforceState(pepperEnv, 'ctl*', 'heat')
                 }
 
             } catch (Exception e) {
@@ -374,53 +374,53 @@
 
                 // database restore section
                 try {
-                    salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
                 } catch (Exception er) {
                     common.warningMsg('Mysql service already stopped')
                 }
                 try {
-                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
                 } catch (Exception er) {
                     common.warningMsg('Mysql service already stopped')
                 }
                 try {
-                    salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+                    salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
                 } catch (Exception er) {
                     common.warningMsg('Files are not present')
                 }
                 try {
-                    salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+                    salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
                 } catch (Exception er) {
                     common.warningMsg('Directory already exists')
                 }
                 try {
-                    salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+                    salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
                 } catch (Exception er) {
                     common.warningMsg('Directory already empty')
                 }
                 try {
-                    salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+                    salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
                 } catch (Exception er) {
                     common.warningMsg('Files were already moved')
                 }
                 try {
-                    salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
                 } catch (Exception er) {
                     common.warningMsg('File is not present')
                 }
-                salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-                _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+                salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+                _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
                 backup_dir = _pillar['return'][0].values()[0]
                 if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
                 print(backup_dir)
-                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-                salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+                salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
 
                 // wait until mysql service on galera master is up
-                salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+                salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
 
-                salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
                 //
 
                 common.errorMsg("Stage Real control upgrade failed")
@@ -430,7 +430,7 @@
                 ceph = null
 
                 try {
-                    ceph = salt.cmdRun(saltMaster, 'ctl*', "salt-call grains.item roles | grep ceph.client")
+                    ceph = salt.cmdRun(pepperEnv, 'ctl*', "salt-call grains.item roles | grep ceph.client")
 
                 } catch (Exception er) {
                     common.infoMsg("Ceph is not used")
@@ -438,42 +438,42 @@
 
                 if(ceph != null) {
                     try {
-                        salt.enforceState(saltMaster, 'ctl*', 'ceph.client')
+                        salt.enforceState(pepperEnv, 'ctl*', 'ceph.client')
                     } catch (Exception er) {
                         common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
                     }
                 }
 
                 // salt 'cmp*' cmd.run 'service nova-compute restart'
-                salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
-                salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
-                salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
 
 
                 // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
                 // salt 'ctl*' state.sls keepalived
                 // salt 'prx*' state.sls keepalived
-                salt.enforceState(saltMaster, 'prx*', 'keepalived')
+                salt.enforceState(pepperEnv, 'prx*', 'keepalived')
                 // salt 'prx*' state.sls horizon
-                salt.enforceState(saltMaster, 'prx*', 'horizon')
+                salt.enforceState(pepperEnv, 'prx*', 'horizon')
                 // salt 'prx*' state.sls nginx
-                salt.enforceState(saltMaster, 'prx*', 'nginx')
+                salt.enforceState(pepperEnv, 'prx*', 'nginx')
                 // salt "prx*" state.sls memcached
-                salt.enforceState(saltMaster, 'prx*', 'memcached')
+                salt.enforceState(pepperEnv, 'prx*', 'memcached')
 
                 try {
-                    salt.enforceHighstate(saltMaster, 'ctl*')
+                    salt.enforceHighstate(pepperEnv, 'ctl*')
                 } catch (Exception er) {
                     common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
                 }
 
                 try {
-                    salt.enforceHighstate(saltMaster, 'prx*')
+                    salt.enforceHighstate(pepperEnv, 'prx*')
                 } catch (Exception er) {
                     common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
                 }
 
-                salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+                salt.cmdRun(pepperEnv, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
             }
         }
 
@@ -491,110 +491,110 @@
                 input message: "Do you really want to continue with the rollback?"
             }
 
-            _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+            _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
             domain = _pillar['return'][0].values()[0].values()[0]
             print(_pillar)
             print(domain)
 
-            _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+            _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
             kvm01 = _pillar['return'][0].values()[0].values()[0]
             print(_pillar)
             print(kvm01)
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
             def ctl01NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
             def ctl02NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
             def ctl03NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
             def prx01NodeProvider = _pillar['return'][0].values()[0]
 
-            _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+            _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
             def prx02NodeProvider = _pillar['return'][0].values()[0]
 
-            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
 
-            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'file.copy', ["/root/prx01.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx01.${domain}/system.qcow2"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'file.copy', ["/root/prx02.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx02.${domain}/system.qcow2"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'file.copy', ["/root/ctl01.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl01.${domain}/system.qcow2"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'file.copy', ["/root/ctl02.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl02.${domain}/system.qcow2"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'file.copy', ["/root/ctl03.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl03.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'file.copy', ["/root/prx01.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx01.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'file.copy', ["/root/prx02.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx02.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'file.copy', ["/root/ctl01.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl01.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'file.copy', ["/root/ctl02.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl02.${domain}/system.qcow2"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'file.copy', ["/root/ctl03.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl03.${domain}/system.qcow2"], null, true)
 
             try {
-                salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+                salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
             } catch (Exception e) {
                 common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
             }
 
             // database restore section
             try {
-                salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
             } catch (Exception e) {
                 common.warningMsg('Mysql service already stopped')
             }
             try {
-                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
             } catch (Exception e) {
                 common.warningMsg('Mysql service already stopped')
             }
             try {
-                salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+                salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
             } catch (Exception e) {
                 common.warningMsg('Files are not present')
             }
             try {
-                salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+                salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
             } catch (Exception e) {
                 common.warningMsg('Directory already empty')
             }
             try {
-                salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
             } catch (Exception e) {
                 common.warningMsg('File is not present')
             }
-            salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-            _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+            salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+            _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
             backup_dir = _pillar['return'][0].values()[0]
             if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
             print(backup_dir)
-            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-            salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+            salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
 
             // wait until mysql service on galera master is up
-            salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+            salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
 
-            salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
             //
 
-            salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.start', ["ctl02.${domain}"], null, true)
-            salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.start', ["ctl03.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.start', ["ctl02.${domain}"], null, true)
+            salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.start', ["ctl03.${domain}"], null, true)
 
             // salt 'cmp*' cmd.run 'service nova-compute restart'
-            salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'cmp*', 'service.restart', ['nova-compute'], null, true)
 
             // wait until ctl and prx nodes are registered in salt-key
-            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
-            salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl01')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl02')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl03')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'prx01')
+            salt.minionPresent(pepperEnv, 'I@salt:master', 'prx02')
 
-            salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
-            salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
 
-            salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
+            salt.cmdRun(pepperEnv, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
         }
     }
 }
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
index 9cfa215..1b21618 100644
--- a/ovs-gateway-upgrade.groovy
+++ b/ovs-gateway-upgrade.groovy
@@ -12,8 +12,9 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def targetTestSubset
 def targetLiveSubset
 def targetLiveAll
@@ -27,12 +28,12 @@
 node() {
     try {
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('List target servers') {
-            minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
 
             if (minions.isEmpty()) {
                 throw new Exception("No minion was targeted")
@@ -55,11 +56,11 @@
 
 
         stage("Add new repos on test nodes") {
-            salt.enforceState(saltMaster, targetTestSubset, 'linux.system.repo')
+            salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
         }
 
         stage("List package upgrades") {
-            salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
         }
 
         stage('Confirm upgrade on sample nodes') {
@@ -67,14 +68,14 @@
         }
 
         stage("Add new repos on sample nodes") {
-            salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+            salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
         }
 
         args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
 
         stage('Test upgrade on sample') {
             try {
-                salt.cmdRun(saltMaster, targetLiveSubset, args)
+                salt.cmdRun(pepperEnv, targetLiveSubset, args)
             } catch (Exception er) {
                 print(er)
             }
@@ -88,23 +89,23 @@
         args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
 
         stage('Apply package upgrades on sample') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
             salt.printSaltCommandResult(out)
         }
 
         args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
 
         stage('Start ovs on sample nodes') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
             salt.printSaltCommandResult(out)
         }
         stage("Run Neutron state on sample nodes") {
-            salt.enforceState(saltMaster, targetLiveSubset, ['neutron'])
+            salt.enforceState(pepperEnv, targetLiveSubset, ['neutron'])
         }
 
         stage("Run Highstate on sample nodes") {
             try {
-                salt.enforceHighstate(saltMaster, targetLiveSubset)
+                salt.enforceHighstate(pepperEnv, targetLiveSubset)
             } catch (Exception er) {
                 common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
             }
@@ -117,29 +118,29 @@
         }
 
         stage("Add new repos on all targeted nodes") {
-            salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+            salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
         }
 
         args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
 
         stage('Apply package upgrades on all targeted nodes') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
             salt.printSaltCommandResult(out)
         }
 
         args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
 
         stage('Start ovs on all targeted nodes') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
             salt.printSaltCommandResult(out)
         }
         stage("Run Neutron state on all targeted nodes") {
-            salt.enforceState(saltMaster, targetLiveAll, ['neutron'])
+            salt.enforceState(pepperEnv, targetLiveAll, ['neutron'])
         }
 
         stage("Run Highstate on all targeted nodes") {
             try {
-                salt.enforceHighstate(saltMaster, targetLiveAll)
+                salt.enforceHighstate(pepperEnv, targetLiveAll)
             } catch (Exception er) {
                 common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
             }
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 4b554b0..586940b 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -9,14 +9,14 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-
-def saltMaster
+def pepperEnv = "pepperEnv"
 
 node() {
 
-    stage('Connect to Salt API') {
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
     }
 
     stage('Start restore') {
@@ -27,54 +27,54 @@
         }
         // Cassandra restore section
         try {
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
         } catch (Exception er) {
             common.warningMsg('Supervisor-database service already stopped')
         }
         try {
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
         } catch (Exception er) {
             common.warningMsg('Directory already exists')
         }
 
         try {
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
         } catch (Exception er) {
             common.warningMsg('Files were already moved')
         }
         try {
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
         } catch (Exception er) {
             common.warningMsg('Directory already empty')
         }
 
-        _pillar = salt.getPillar(saltMaster, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
+        _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
         backup_dir = _pillar['return'][0].values()[0]
         if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
         print(backup_dir)
-        salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
 
-        salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
 
         // wait until supervisor-database service is up
-        salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+        salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
         sleep(5)
         // performs restore
-        salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
-        salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+        salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
+        salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
 
         // wait until supervisor-database service is up
-        salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-        salt.commandStatus(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+        salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+        salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
         sleep(5)
 
-        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
 
         // wait until contrail-status is up
-        salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+        salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
         
-        salt.cmdRun(saltMaster, 'I@opencontrail:control', "nodetool status")
-        salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
+        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
+        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
     }
 }
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index 3ead8aa..0f32576 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -9,14 +9,14 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-
-def saltMaster
+def pepperEnv = "pepperEnv"
 
 node() {
 
-    stage('Connect to Salt API') {
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
     }
 
     stage('Start restore') {
@@ -27,63 +27,63 @@
         }
         // Zookeeper restore section
         try {
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
         } catch (Exception er) {
             common.warningMsg('Supervisor-config service already stopped')
         }
         try {
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
         } catch (Exception er) {
             common.warningMsg('Supervisor-control service already stopped')
         }
         try {
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
         } catch (Exception er) {
             common.warningMsg('Zookeeper service already stopped')
         }
         //sleep(5)
         // wait until zookeeper service is down
-        salt.commandStatus(saltMaster, 'I@opencontrail:control', 'service zookeeper status', 'stop')
+        salt.commandStatus(pepperEnv, 'I@opencontrail:control', 'service zookeeper status', 'stop')
 
         try {
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
         } catch (Exception er) {
             common.warningMsg('Directory already exists')
         }
 
         try {
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
         } catch (Exception er) {
             common.warningMsg('Files were already moved')
         }
         try {
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
         } catch (Exception er) {
             common.warningMsg('Directory already empty')
         }
 
-        _pillar = salt.getPillar(saltMaster, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
+        _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
         backup_dir = _pillar['return'][0].values()[0]
         if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/zookeeper' }
         print(backup_dir)
-        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
 
         // performs restore
-        salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
+        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
 
-        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
-        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
-        salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
 
         // wait until contrail-status is up
-        salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+        salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
 
-        salt.cmdRun(saltMaster, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
+        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
         try {
-            salt.cmdRun(saltMaster, 'I@opencontrail:control', "echo stat | nc localhost 2181")
+            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "echo stat | nc localhost 2181")
         } catch (Exception er) {
             common.warningMsg('Check which node is zookeeper leader')
         }
-        salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
+        salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
     }
 }
diff --git a/tcp-qa-pipeline.groovy b/tcp-qa-pipeline.groovy
index 1b178b4..17a7c3e 100644
--- a/tcp-qa-pipeline.groovy
+++ b/tcp-qa-pipeline.groovy
@@ -81,7 +81,7 @@
             . ${VENV_PATH}/bin/activate
 
             cd tcp_tests
-            if ! py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}; then
+            if ! py.test -vvv -s -p no:django -p no:ipdb --junit-xml=../nosetests.xml -k ${TEST_GROUP}; then
               echo "Tests failed!"
               exit 1
             fi
@@ -98,14 +98,14 @@
 
 def uploadResults(){
     stage('Upload tests results'){
-        def thisBuildUrl = "${env.JENKINS_URL}/job/${JOB_NAME}/${BUILD_NUMBER}/"
-        def testPlanName = "${env.TESTRAIL_MILESTONE} Integration-${new Date().format('yyyy-MM-dd')}"
+        def thisBuildUrl = "${JENKINS_URL}/job/${JOB_NAME}/${BUILD_NUMBER}/"
+        def testPlanName = "${TESTRAIL_MILESTONE} Integration-${new Date().format('yyyy-MM-dd')}"
 
         qaCommon.uploadResultsTestRail([
-            junitXml: "${env.WORKSPACE}/nosetests.xml",
+            junitXml: "${WORKSPACE}/nosetests.xml",
             testPlanName: testPlanName,
-            testSuiteName: "${env.TESTRAIL_TEST_SUITE}",
-            testrailMilestone: "${env.TESTRAIL_MILESTONE}",
+            testSuiteName: "${TESTRAIL_TEST_SUITE}",
+            testrailMilestone: "${TESTRAIL_MILESTONE}",
             jobURL: thisBuildUrl,
         ])
     }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index e59f0ce..70e74b7 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -41,7 +41,7 @@
 
     common.infoMsg("Generating model from context ${modelFile}")
 
-    def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
+    def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
     for (product in productList) {
 
         // get templateOutputDir and productDir
diff --git a/test-run-rally.groovy b/test-run-rally.groovy
index 4cf3bd3..d92d988 100644
--- a/test-run-rally.groovy
+++ b/test-run-rally.groovy
@@ -17,9 +17,9 @@
 common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
 
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
 
 node("python") {
     try {
@@ -27,9 +27,8 @@
         //
         // Prepare connection
         //
-        stage ('Connect to salt master') {
-            // Connect to Salt master
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         //
@@ -37,14 +36,14 @@
         //
 
         stage('Run OpenStack Rally scenario') {
-            test.runRallyScenarios(saltMaster, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
+            test.runRallyScenarios(pepperEnv, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
                     DO_CLEANUP_RESOURCES)
         }
         stage('Copy test reports') {
-            test.copyTempestResults(saltMaster, TEST_TARGET)
+            test.copyTempestResults(pepperEnv, TEST_TARGET)
         }
         stage('Archiving test artifacts') {
-            test.archiveRallyArtifacts(saltMaster, TEST_TARGET)
+            test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
         }
     } catch (Throwable e) {
         currentBuild.result = 'FAILURE'
@@ -52,8 +51,8 @@
     } finally {
         if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
             stage('Cleanup reports and container') {
-                test.removeReports(saltMaster, TEST_TARGET, "rally_reports", 'rally_reports.tar')
-                test.removeDockerContainer(saltMaster, TEST_TARGET, IMAGE_LINK)
+                test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+                test.removeDockerContainer(pepperEnv, TEST_TARGET, IMAGE_LINK)
             }
         }
     }
diff --git a/test-run-tempest.groovy b/test-run-tempest.groovy
index 4785992..0011daa 100644
--- a/test-run-tempest.groovy
+++ b/test-run-tempest.groovy
@@ -17,19 +17,15 @@
 common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
 
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
 
 node("python") {
     try {
 
-        //
-        // Prepare connection
-        //
-        stage ('Connect to salt master') {
-            // Connect to Salt master
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         //
@@ -37,14 +33,14 @@
         //
 
         stage('Run OpenStack Tempest tests') {
-            test.runTempestTests(saltMaster, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
+            test.runTempestTests(pepperEnv, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
                     DO_CLEANUP_RESOURCES)
         }
         stage('Copy test reports') {
-            test.copyTempestResults(saltMaster, TEST_TARGET)
+            test.copyTempestResults(pepperEnv, TEST_TARGET)
         }
         stage('Archiving test artifacts') {
-            test.archiveRallyArtifacts(saltMaster, TEST_TARGET)
+            test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
         }
     } catch (Throwable e) {
         currentBuild.result = 'FAILURE'
@@ -52,8 +48,8 @@
     } finally {
         if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
             stage('Cleanup reports and container') {
-                test.removeReports(saltMaster, TEST_TARGET, "rally_reports", 'rally_reports.tar')
-                test.removeDockerContainer(saltMaster, TEST_TARGET, IMAGE_LINK)
+                test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+                test.removeDockerContainer(pepperEnv, TEST_TARGET, IMAGE_LINK)
             }
         }
     }
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 1494aca..fe7809c 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -3,8 +3,9 @@
  *  DEFAULT_GIT_REF
  *  DEFAULT_GIT_URL
  *  CREDENTIALS_ID
+ *  KITCHEN_TESTS_PARALLEL
  */
-def common = new com.mirantis.mk.Common()
+common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 def ruby = new com.mirantis.mk.Ruby()
 
@@ -26,6 +27,43 @@
 
 def checkouted = false
 
+futureFormulas = []
+failedFormulas = []
+
+def setupRunner(defaultGitRef, defaultGitUrl) {
+  def branches = [:]
+  for (int i = 0; i < PARALLEL_GROUP_SIZE.toInteger() && i < futureFormulas.size(); i++) {
+    branches["Runner ${i}"] = {
+      while (futureFormulas && !failedFormulas) {
+        def currentFormula = futureFormulas[0] ? futureFormulas[0] : null
+        if (!currentFormula) {
+          continue
+        }
+        futureFormulas.remove(currentFormula)
+        try {
+          triggerTestFormulaJob(currentFormula, defaultGitRef, defaultGitUrl)
+        } catch (Exception e) {
+          failedFormulas << currentFormula
+          common.warningMsg("Test of ${currentFormula} failed :  ${e}")
+        }
+      }
+    }
+  }
+  parallel branches
+}
+
+def triggerTestFormulaJob(testEnv, defaultGitRef, defaultGitUrl) {
+  common.infoMsg("Test of ${testEnv} starts")
+  build job: "test-salt-formulas-env", parameters: [
+    [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
+    [$class: 'StringParameterValue', name: 'KITCHEN_ENV', value: testEnv],
+    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: defaultGitRef],
+    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: defaultGitUrl],
+    [$class: 'StringParameterValue', name: 'SALT_OPTS', value: SALT_OPTS],
+    [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
+  ]
+}
+
 node("python") {
   try {
     stage("checkout") {
@@ -84,28 +122,22 @@
             common.infoMsg("CUSTOM_KITCHEN_ENVS not empty. Running with custom enviroments: ${kitchenEnvs}")
           }
           if (kitchenEnvs != null && kitchenEnvs != '') {
-            def kitchenTestRuns = [:]
+            def acc = 0
             common.infoMsg("Found " + kitchenEnvs.size() + " environment(s)")
             for (int i = 0; i < kitchenEnvs.size(); i++) {
-              def testEnv = kitchenEnvs[i]
-              kitchenTestRuns[testEnv] = {
-                build job: "test-salt-formulas-env", parameters: [
-                  [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
-                  [$class: 'StringParameterValue', name: 'KITCHEN_ENV', value: testEnv],
-                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: defaultGitRef],
-                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: defaultGitUrl],
-                  [$class: 'StringParameterValue', name: 'SALT_OPTS', value: SALT_OPTS],
-                  [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
-                ]
-              }
+              futureFormulas << kitchenEnvs[i]
             }
-            parallel kitchenTestRuns
+            setupRunner(defaultGitRef, defaultGitUrl)
           } else {
             common.warningMsg(".kitchen.yml file not found, no kitchen tests triggered.")
           }
         }
       }
     }
+    if (failedFormulas) {
+      currentBuild.result = "FAILURE"
+      common.warningMsg("The following tests failed: ${failedFormulas}")
+    }
   } catch (Throwable e) {
     // If there was an error or exception thrown, the build failed
     currentBuild.result = "FAILURE"
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index eaf0104..8e44bfc 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -11,6 +11,10 @@
  *  SYSTEM_GIT_REF
  *  FORMULAS_SOURCE
  *  MAX_CPU_PER_JOB
+ *  LEGACY_TEST_MODE
+ *  RECLASS_IGNORE_CLASS_NOTFOUND
+ *  APT_REPOSITORY
+ *  APT_REPOSITORY_GPG
  */
 
 def common = new com.mirantis.mk.Common()
@@ -56,7 +60,8 @@
       stage("test node") {
         if (checkouted) {
           def workspace = common.getWorkspace()
-          saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger())
+          common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
+          saltModelTesting.setupAndTestNode(NODE_TARGET, CLUSTER_NAME, EXTRA_FORMULAS, workspace, FORMULAS_SOURCE, FORMULAS_REVISION, MAX_CPU_PER_JOB.toInteger(), RECLASS_IGNORE_CLASS_NOTFOUND, LEGACY_TEST_MODE, APT_REPOSITORY, APT_REPOSITORY_GPG)
         }
       }
     } catch (Throwable e) {
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 7099954..c931e60 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -8,9 +8,12 @@
  *  SYSTEM_GIT_URL
  *  SYSTEM_GIT_REF
  *  MAX_CPU_PER_JOB
+ *  LEGACY_TEST_MODE
+ *  RECLASS_IGNORE_CLASS_NOTFOUND
+ *  APT_REPOSITORY
+ *  APT_REPOSITORY_GPG
  */
 
-def common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 def ssh = new com.mirantis.mk.Ssh()
 def git = new com.mirantis.mk.Git()
@@ -44,7 +47,62 @@
     defaultGitRef = null
     defaultGitUrl = null
 }
+
 def checkouted = false
+futureNodes = []
+failedNodes = []
+common = new com.mirantis.mk.Common()
+
+def setupRunner() {
+
+  def branches = [:]
+  for (int i = 0; i < PARALLEL_NODE_GROUP_SIZE.toInteger() && i < futureNodes.size(); i++) {
+    branches["Runner ${i}"] = {
+      while (futureNodes) {
+        def currentNode = futureNodes[0] ? futureNodes[0] : null
+        if (!currentNode) {
+          continue
+        }
+
+        def clusterName = currentNode[2]
+        futureNodes.remove(currentNode)
+        try {
+            triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
+        } catch (Exception e) {
+          failedNodes << currentNode
+          common.warningMsg("Test of ${clusterName} failed :  ${e}")
+        }
+      }
+    }
+  }
+  failedNodes = []
+  if (branches) {
+    parallel branches
+  }
+}
+
+def triggerTestNodeJob(defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource) {
+  common.infoMsg("Test of ${clusterName} starts")
+  build job: "test-salt-model-node", parameters: [
+    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: defaultGitUrl],
+    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: defaultGitRef],
+    [$class: 'StringParameterValue', name: 'CLUSTER_NAME', value: clusterName],
+    [$class: 'StringParameterValue', name: 'NODE_TARGET', value: testTarget],
+    [$class: 'StringParameterValue', name: 'FORMULAS_SOURCE', value: formulasSource],
+    [$class: 'StringParameterValue', name: 'EXTRA_FORMULAS', value: EXTRA_FORMULAS],
+    [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: FORMULAS_REVISION],
+    [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
+    [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: SYSTEM_GIT_URL],
+    [$class: 'StringParameterValue', name: 'MAX_CPU_PER_JOB', value: MAX_CPU_PER_JOB],
+    [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: SYSTEM_GIT_REF],
+    [$class: 'BooleanParameterValue', name: 'LEGACY_TEST_MODE', value: LEGACY_TEST_MODE.toBoolean()],
+    [$class: 'BooleanParameterValue', name: 'RECLASS_IGNORE_CLASS_NOTFOUND', value: RECLASS_IGNORE_CLASS_NOTFOUND.toBoolean()],
+    [$class: 'StringParameterValue', name: 'APT_REPOSITORY', value: APT_REPOSITORY],
+    [$class: 'StringParameterValue', name: 'APT_REPOSITORY_GPG', value: APT_REPOSITORY_GPG]
+  ]
+}
+
+
 node("python") {
   try{
     stage("checkout") {
@@ -80,8 +138,37 @@
 
     stage("test-nodes") {
       if(checkouted) {
+        def modifiedClusters = null
+
+        if (gerritRef) {
+          checkChange = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v classes/cluster", returnStatus: true)
+          if (checkChange == 1) {
+            modifiedClusters = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep classes/cluster/ | awk -F/ '{print \$3}' | uniq", returnStdout: true).tokenize()
+          }
+        }
+
         def infraYMLs = sh(script: "find ./classes/ -regex '.*cluster/[-_a-zA-Z0-9]*/[infra/]*init\\.yml' -exec grep -il 'cluster_name' {} \\;", returnStdout: true).tokenize()
-        def branches = [:]
+        def clusterDirectories = sh(script: "ls -d ./classes/cluster/*/ | awk -F/ '{print \$4}'", returnStdout: true).tokenize()
+
+        // create a list of cluster names present in cluster folder
+        def infraList = []
+        for (elt in infraYMLs) {
+          infraList << elt.tokenize('/')[3]
+        }
+
+        // verify we have all valid clusters loaded
+        def commonList = infraList.intersect(clusterDirectories)
+        def differenceList = infraList.plus(clusterDirectories)
+        differenceList.removeAll(commonList)
+
+        if(!differenceList.isEmpty()){
+          common.warningMsg("The following clusters are not valid : ${differenceList} - That means we cannot found cluster_name in init.yml or infra/init.yml")
+        }
+        if (modifiedClusters) {
+          infraYMLs.removeAll { !modifiedClusters.contains(it.tokenize('/')[3]) }
+          common.infoMsg("Testing only modified clusters: ${infraYMLs}")
+        }
+
         for (int i = 0; i < infraYMLs.size(); i++) {
           def infraYMLConfig = readYaml(file: infraYMLs[i])
           if(!infraYMLConfig["parameters"].containsKey("_param")){
@@ -98,22 +185,24 @@
           def configHostname = infraParams["infra_config_hostname"]
           def testTarget = String.format("%s.%s", configHostname, clusterDomain)
 
-          branches[testTarget] = {
-            build job: "test-salt-model-node", parameters: [
-              [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: defaultGitUrl],
-              [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: defaultGitRef],
-              [$class: 'StringParameterValue', name: 'CLUSTER_NAME', value: clusterName],
-              [$class: 'StringParameterValue', name: 'NODE_TARGET', value: testTarget],
-              [$class: 'StringParameterValue', name: 'FORMULAS_SOURCE', value: formulasSource],
-              [$class: 'StringParameterValue', name: 'EXTRA_FORMULAS', value: EXTRA_FORMULAS],
-              [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: FORMULAS_REVISION],
-              [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
-              [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: SYSTEM_GIT_URL],
-              [$class: 'StringParameterValue', name: 'MAX_CPU_PER_JOB', value: MAX_CPU_PER_JOB],
-              [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: SYSTEM_GIT_REF]
-            ]}
+          futureNodes << [defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource]
         }
-        parallel branches
+
+        setupRunner()
+
+        def maxNodes = infraYMLs.size() > 10 ? infraYMLs.size() / 2 : 5
+        if (failedNodes && failedNodes.size() <= maxNodes) {
+          common.infoMsg("Some tests failed. They will be retriggered to make sure the failure is correct")
+          for (int retry = 0; retry < 2 && failedNodes; retry++) {
+            futureNodes = failedNodes
+            failedNodes = []
+            setupRunner()
+          }
+        }
+
+        if (failedNodes) {
+          currentBuild.result = "FAILURE"
+        }
       }
     }
   } catch (Throwable e) {
@@ -124,4 +213,3 @@
      common.sendNotification(currentBuild.result,"",["slack"])
   }
 }
-
diff --git a/test-service.groovy b/test-service.groovy
index f7cdd64..0b6da19 100644
--- a/test-service.groovy
+++ b/test-service.groovy
@@ -20,19 +20,15 @@
 git = new com.mirantis.mk.Git()
 salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
 
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
 
 node("python") {
     try {
 
-        //
-        // Prepare connection
-        //
-        stage ('Connect to salt master') {
-            // Connect to Salt master
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         //
@@ -46,11 +42,11 @@
                 def output_file = image.replaceAll('/', '-') + '.output'
 
                 // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
+                test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
 
                 // collect output
                 sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+                file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
                 writeFile file: "${artifacts_dir}${output_file}", text: file_content
                 sh "cat ${artifacts_dir}${output_file}"
 
@@ -63,11 +59,11 @@
                 def output_file = image.replaceAll('/', '-') + '.output'
 
                 // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
+                test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
 
                 // collect output
                 sh "mkdir -p ${artifacts_dir}"
-                file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+                file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
                 writeFile file: "${artifacts_dir}${output_file}", text: file_content
                 sh "cat ${artifacts_dir}${output_file}"
 
@@ -78,14 +74,14 @@
 
         if (common.checkContains('TEST_SERVICE', 'openstack')) {
             if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
-                test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
+                test.install_docker(pepperEnv, TEST_TEMPEST_TARGET)
             }
 
             stage('Run OpenStack tests') {
-                test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
+                test.runTempestTests(pepperEnv, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
             }
 
-            writeFile(file: 'report.xml', text: salt.getFileContent(saltMaster, TEST_TEMPEST_TARGET, '/root/report.xml'))
+            writeFile(file: 'report.xml', text: salt.getFileContent(pepperEnv, TEST_TEMPEST_TARGET, '/root/report.xml'))
             junit(keepLongStdio: true, testResults: 'report.xml', healthScaleFactor:  Double.parseDouble(TEST_JUNIT_RATIO))
             def testResults = test.collectJUnitResults(currentBuild.rawBuild.getAction(hudson.tasks.test.AbstractTestResultAction.class))
             if(testResults){
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index 411edfc..03bd9fe 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -26,9 +26,9 @@
 def checkouted = false
 def merged = false
 def systemRefspec = "HEAD"
-try {
-  stage("Checkout") {
-    node() {
+node() {
+  try {
+    stage("Checkout") {
       if (gerritRef) {
         // job is triggered by Gerrit
         // test if change aren't already merged
@@ -46,38 +46,44 @@
           checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", gerritCredentials)
       }
     }
-  }
 
-  stage("Test") {
-    if(merged){
-      common.successMsg("Gerrit change is already merged, no need to test them")
-    }else{
-      if(checkouted){
-        def branches = [:]
-        def testModels = TEST_MODELS.split(',')
-          for (int i = 0; i < testModels.size(); i++) {
-            def cluster = testModels[i]
-            def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
-            branches["${cluster}"] = {
-              build job: "test-salt-model-${cluster}", parameters: [
-                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
-                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
-                [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
-                [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
-              ]
-            }
-          }
-        parallel branches
+    stage("Test") {
+      if(merged){
+        common.successMsg("Gerrit change is already merged, no need to test them")
       }else{
-         throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
+        if(checkouted){
+
+          def documentationOnly = false
+          if (gerritRef) {
+            documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
+          }
+
+          def branches = [:]
+          def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
+            for (int i = 0; i < testModels.size(); i++) {
+              def cluster = testModels[i]
+              def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
+              branches["${cluster}"] = {
+                build job: "test-salt-model-${cluster}", parameters: [
+                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
+                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+                  [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
+                  [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
+                ]
+              }
+            }
+          parallel branches
+        }else{
+           throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
+        }
       }
     }
+  } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      throw e
+  } finally {
+      common.sendNotification(currentBuild.result,"",["slack"])
   }
-} catch (Throwable e) {
-    // If there was an error or exception thrown, the build failed
-    currentBuild.result = "FAILURE"
-    currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-    throw e
-} finally {
-    common.sendNotification(currentBuild.result,"",["slack"])
 }
diff --git a/update-jenkins-master-jobs.groovy b/update-jenkins-master-jobs.groovy
index 56edb10..1361632 100644
--- a/update-jenkins-master-jobs.groovy
+++ b/update-jenkins-master-jobs.groovy
@@ -10,20 +10,21 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def target = ['expression': TARGET_SERVERS, 'type': 'compound']
 def result
 
 node("python") {
     try {
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('Update Jenkins jobs') {
-            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'jenkins.client')
+            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'jenkins.client')
             salt.checkResult(result)
         }
 
diff --git a/update-mirror-image.groovy b/update-mirror-image.groovy
new file mode 100644
index 0000000..0e28a4e
--- /dev/null
+++ b/update-mirror-image.groovy
@@ -0,0 +1,52 @@
+/**
+ * Update mirror image
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
+ *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def venvPepper = "venvPepper"
+
+node() {
+    try {
+
+        python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+        stage('Update Aptly packages'){
+            common.infoMsg("Updating Aptly packages.")
+            salt.enforceState(venvPepper, 'apt*', ['aptly'], true)
+            salt.runSaltProcessStep(venvPepper, 'apt*', 'cmd.run', ['/srv/scripts/aptly-update.sh'], null, true)
+        }
+
+        stage('Update Docker images'){
+            common.infoMsg("Updating Docker images.")
+            salt.enforceState(venvPepper, 'apt*', ['docker.client.registry'], true)
+        }
+
+        stage('Update PyPi packages'){
+            common.infoMsg("Updating PyPi packages.")
+            salt.runSaltProcessStep(venvPepper, 'apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+        }
+
+        stage('Update Git repositories'){
+            common.infoMsg("Updating Git repositories.")
+            salt.enforceState(venvPepper, 'apt*', ['git.server'], true)
+        }
+
+        stage('Update VM images'){
+            common.infoMsg("Updating VM images.")
+            salt.runSaltProcessStep(venvPepper, 'apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+        }
+
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+        throw e
+    }
+}
\ No newline at end of file
diff --git a/update-package.groovy b/update-package.groovy
index c946123..c71f598 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -14,8 +14,9 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def targetTestSubset
 def targetLiveSubset
 def targetLiveAll
@@ -28,12 +29,12 @@
 node() {
     try {
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('List target servers') {
-            minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+            minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
 
             if (minions.isEmpty()) {
                 throw new Exception("No minion was targeted")
@@ -54,7 +55,7 @@
 
         stage("List package upgrades") {
             common.infoMsg("Listing all the packages that have a new update available on test nodes: ${targetTestSubset}")
-            salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+            salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
             if(TARGET_PACKAGES != "" && TARGET_PACKAGES != "*"){
                 common.infoMsg("Note that only the ${TARGET_PACKAGES} would be installed from the above list of available updates on the ${targetTestSubset}")
             }
@@ -88,7 +89,7 @@
         }
 
         stage('Apply package upgrades on sample') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
             salt.printSaltCommandResult(out)
         }
 
@@ -99,7 +100,7 @@
         }
 
         stage('Apply package upgrades on all nodes') {
-            out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
+            out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
             salt.printSaltCommandResult(out)
         }
 
diff --git a/update-reclass-metadata.groovy b/update-reclass-metadata.groovy
index 6fb539a..be695ca 100644
--- a/update-reclass-metadata.groovy
+++ b/update-reclass-metadata.groovy
@@ -10,21 +10,22 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def target = ['expression': TARGET_SERVERS, 'type': 'compound']
 def result
 
 node("python") {
     try {
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('Update Reclass model') {
-            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'reclass.storage')
-            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'reclass.storage.node')
+            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage')
+            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage.node')
             salt.checkResult(result)
         }
 
diff --git a/update-salt-environment.groovy b/update-salt-environment.groovy
new file mode 100644
index 0000000..0b570fc
--- /dev/null
+++ b/update-salt-environment.groovy
@@ -0,0 +1,54 @@
+/**
+ *
+ * Update Salt environment pipeline
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL            Salt API server location
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API
+ *   UPDATE_FORMULAS            Boolean switch for enforcing updating formulas
+ */
+
+// Load shared libs
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+def venvPepper = "venvPepper"
+
+node() {
+    try {
+        python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+        stage("Update formulas"){
+            if(UPDATE_FORMULAS.toBoolean()){
+                common.infoMsg("Updating salt formulas")
+                salt.cmdRun(
+                    venvPepper,
+                    "I@salt:master",
+                    'apt-get update && apt-get install -y salt-formula-*'
+                )
+                common.infoMsg("Running salt sync-all")
+                salt.runSaltProcessStep(venvPepper, 'jma*', 'saltutil.sync_all', [], null, true)
+            }
+        }
+        stage("Update Reclass") {
+            common.infoMsg("Updating reclass model")
+            salt.cmdRun(
+                venvPepper,
+                "I@salt:master",
+                'cd /srv/salt/reclass && git pull -r && git submodule update',
+                false
+            )
+
+            salt.enforceState(
+                venvPepper,
+                "I@salt:master",
+                'reclass',
+                true
+            )
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    }
+}
\ No newline at end of file
diff --git a/update-salt-master-formulas.groovy b/update-salt-master-formulas.groovy
index f3e7d1c..9d556f0 100644
--- a/update-salt-master-formulas.groovy
+++ b/update-salt-master-formulas.groovy
@@ -10,20 +10,21 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def target = ['expression': TARGET_SERVERS, 'type': 'compound']
 def result
 
 node("python") {
     try {
 
-        stage('Connect to Salt master') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('Update Salt formulas') {
-            result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'salt.master.env')
+            result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'salt.master.env')
             salt.checkResult(result)
         }
 
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 5768f59..0ff946d 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -14,43 +14,46 @@
  *   RUN_K8S_TESTS               If not false, run Kubernetes tests
  *   RUN_SPT_TESTS               If not false, run SPT tests
  *   SPT_SSH_USER                The name of the user which should be used for ssh to nodes
- *   SPT_FLOATING_NETWORK        The name of the external(floating) network
  *   SPT_IMAGE                   The name of the image for SPT tests
- *   SPT_USER                    The name of the user for SPT image
+ *   SPT_IMAGE_USER              The name of the user for SPT image
  *   SPT_FLAVOR                  The name of the flavor for SPT image
- *   SPT_AVAILABILITY_ZONE       The name of availability zone
+ *   AVAILABILITY_ZONE           The name of availability zone
+ *   FLOATING_NETWORK            The name of the external(floating) network
+ *   RALLY_IMAGE                 The name of the image for Rally tests
+ *   RALLY_FLAVOR                The name of the flavor for Rally image
  *   TEST_K8S_API_SERVER         Kubernetes API address
  *   TEST_K8S_CONFORMANCE_IMAGE  Path to docker image with conformance e2e tests
+ *   TEST_K8S_NODE               Kubernetes node to run tests from
+ *   GENERATE_REPORT             If not false, run report generation command
+ *   ACCUMULATE_RESULTS          If true, results from the previous build will be used
  *
  */
 
 common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
 test = new com.mirantis.mk.Test()
 validate = new com.mirantis.mcp.Validate()
+def python = new com.mirantis.mk.Python()
 
-def saltMaster
+def pepperEnv = "pepperEnv"
 def artifacts_dir = 'validation_artifacts/'
 
 node() {
     try{
-        stage('Initialization') {
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
         stage('Configure') {
-            validate.installDocker(saltMaster, TARGET_NODE)
+            validate.installDocker(pepperEnv, TARGET_NODE)
+            if (ACCUMULATE_RESULTS.toBoolean() == false) {
+                sh "rm -r ${artifacts_dir}"
+            }
             sh "mkdir -p ${artifacts_dir}"
-            def spt_variables = "-e spt_ssh_user=${SPT_SSH_USER} " +
-                    "-e spt_floating_network=${SPT_FLOATING_NETWORK} " +
-                    "-e spt_image=${SPT_IMAGE} -e spt_user=${SPT_USER} " +
-                    "-e spt_flavor=${SPT_FLAVOR} -e spt_availability_zone=${SPT_AVAILABILITY_ZONE} "
-            validate.runContainerConfiguration(saltMaster, TEST_IMAGE, TARGET_NODE, artifacts_dir, spt_variables)
         }
 
         stage('Run Tempest tests') {
             if (RUN_TEMPEST_TESTS.toBoolean() == true) {
-                validate.runTempestTests(saltMaster, TARGET_NODE, artifacts_dir, TEMPEST_TEST_SET)
+                validate.runTempestTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, TEMPEST_TEST_SET)
             } else {
                 common.infoMsg("Skipping Tempest tests")
             }
@@ -58,7 +61,11 @@
 
         stage('Run Rally tests') {
             if (RUN_RALLY_TESTS.toBoolean() == true) {
-                validate.runRallyTests(saltMaster, TARGET_NODE, artifacts_dir)
+                def rally_variables = ["floating_network=${FLOATING_NETWORK}",
+                                       "rally_image=${RALLY_IMAGE}",
+                                       "rally_flavor=${RALLY_FLAVOR}",
+                                       "availability_zone=${AVAILABILITY_ZONE}"]
+                validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, rally_variables)
             } else {
                 common.infoMsg("Skipping Rally tests")
             }
@@ -66,7 +73,13 @@
 
         stage('Run SPT tests') {
             if (RUN_SPT_TESTS.toBoolean() == true) {
-                validate.runSptTests(saltMaster, TARGET_NODE, artifacts_dir)
+                def spt_variables = ["spt_ssh_user=${SPT_SSH_USER}",
+                                     "spt_floating_network=${FLOATING_NETWORK}",
+                                     "spt_image=${SPT_IMAGE}",
+                                     "spt_user=${SPT_IMAGE_USER}",
+                                     "spt_flavor=${SPT_FLAVOR}",
+                                     "spt_availability_zone=${AVAILABILITY_ZONE}"]
+                validate.runSptTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, spt_variables)
             } else {
                 common.infoMsg("Skipping SPT tests")
             }
@@ -75,13 +88,11 @@
         stage('Run k8s bootstrap tests') {
             if (RUN_K8S_TESTS.toBoolean() == true) {
                 def image = 'tomkukral/k8s-scripts'
-                def output_file = image.replaceAll('/', '-') + '.output'
+                def output_file = 'k8s-bootstrap-tests.txt'
+                def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
+                test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
 
-                // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
-
-                // collect output
-                def file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+                def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
                 writeFile file: "${artifacts_dir}${output_file}", text: file_content
             } else {
                 common.infoMsg("Skipping k8s bootstrap tests")
@@ -91,19 +102,24 @@
         stage('Run k8s conformance e2e tests') {
             if (RUN_K8S_TESTS.toBoolean() == true) {
                 def image = TEST_K8S_CONFORMANCE_IMAGE
-                def output_file = image.replaceAll('/', '-') + '.output'
+                def output_file = 'report-k8s-e2e-tests.txt'
+                def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
+                test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
 
-                // run image
-                test.runConformanceTests(saltMaster, TEST_K8S_API_SERVER, image)
-
-                // collect output
-                def file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+                def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
                 writeFile file: "${artifacts_dir}${output_file}", text: file_content
             } else {
                 common.infoMsg("Skipping k8s conformance e2e tests")
             }
         }
-
+        stage('Generate report') {
+            if (GENERATE_REPORT.toBoolean() == true) {
+                common.infoMsg("Generating html test report ...")
+                validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
+            } else {
+                common.infoMsg("Skipping report generation")
+            }
+        }
         stage('Collect results') {
             archiveArtifacts artifacts: "${artifacts_dir}/*"
         }
@@ -112,7 +128,5 @@
         currentBuild.result = "FAILURE"
         currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
         throw e
-    } finally {
-        validate.runCleanup(saltMaster, TARGET_NODE, artifacts_dir)
     }
 }
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
index 303c282..50b2bce 100644
--- a/xtrabackup-restore-mysql-db.groovy
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -9,14 +9,14 @@
 
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
 
-
-def saltMaster
+def pepperEnv = "pepperEnv"
 
 node() {
 
-    stage('Connect to Salt API') {
-        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    stage('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
     }
 
     stage('Start restore') {
@@ -27,59 +27,59 @@
         }
         // database restore section
         try {
-            salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
         } catch (Exception er) {
             common.warningMsg('Mysql service already stopped')
         }
         try {
-            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
         } catch (Exception er) {
             common.warningMsg('Mysql service already stopped')
         }
         try {
-            salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+            salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
         } catch (Exception er) {
             common.warningMsg('Files are not present')
         }
         try {
-            salt.cmdRun(saltMaster, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
+            salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
         } catch (Exception er) {
             common.warningMsg('Directory already exists')
         }
         try {
-            salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+            salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
         } catch (Exception er) {
             common.warningMsg('Files were already moved')
         }
         try {
-            salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+            salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
         } catch (Exception er) {
             common.warningMsg('Directory already empty')
         }
         try {
-            salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+            salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
         } catch (Exception er) {
             common.warningMsg('File is not present')
         }
-        salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
-        _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+        salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+        _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
         backup_dir = _pillar['return'][0].values()[0]
         if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
         print(backup_dir)
-        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-        salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
-        salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+        salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+        salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
 
         // wait until mysql service on galera master is up
-        salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+        salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
 
-        salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+        salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
         try {
-            salt.commandStatus(saltMaster, 'I@galera:slave', 'service mysql status', 'running')
+            salt.commandStatus(pepperEnv, 'I@galera:slave', 'service mysql status', 'running')
         } catch (Exception er) {
             common.warningMsg('Either there are no galera slaves or something failed when starting mysql on galera slaves')
         }
         sleep(5)
-        salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
+        salt.cmdRun(pepperEnv, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
     }
 }