Merge "Update classes to new Galera.groovy class" into release/proposed/2019.2.0
diff --git a/backupninja-backup-pipeline.groovy b/backupninja-backup-pipeline.groovy
new file mode 100644
index 0000000..80467d4
--- /dev/null
+++ b/backupninja-backup-pipeline.groovy
@@ -0,0 +1,62 @@
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def pepperEnv = "pepperEnv"
+
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+        stage('Check backup location') {
+            try{
+              backupNode = salt.getMinions(pepperEnv, "I@backupninja:client")[0]
+              salt.minionsReachable(pepperEnv, "I@salt:master", backupNode)
+            }
+            catch (Exception e) {
+                common.errorMsg(e.getMessage())
+                common.errorMsg("Pipeline wasn't able to detect backupninja:client pillar or the minion is not reachable")
+                currentBuild.result = "FAILURE"
+                return
+            }
+            try{
+              backupServer = salt.getMinions(pepperEnv, "I@backupninja:server")[0]
+              salt.minionsReachable(pepperEnv, "I@salt:master", backupServer)
+            }
+            catch (Exception e) {
+                common.errorMsg(e.getMessage())
+                common.errorMsg("Pipeline wasn't able to detect backupninja:server pillar or the minion is not reachable")
+                currentBuild.result = "FAILURE"
+                return
+            }
+        }
+        stage ('Prepare for backup') {
+                salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:server', 'state': 'backupninja'])
+                salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:client', 'state': 'backupninja'])
+        }
+        stage('Backup') {
+            def output = salt.getReturnValues(salt.cmdRun(pepperEnv, backupNode, "su root -c 'backupninja --now -d'")).readLines()[-2]
+            def outputPattern = java.util.regex.Pattern.compile("\\d+")
+            def outputMatcher = outputPattern.matcher(output)
+              if (outputMatcher.find()) {
+                  try{
+                  result = outputMatcher.getAt([0,1,2,3])
+                  }
+                  catch (Exception e){
+                    common.errorMsg(e.getMessage())
+                    common.errorMsg("Parsing failed.")
+                    currentBuild.result = "FAILURE"
+                    return
+                  }
+            }
+            if (result[1] != null && result[1] instanceof String && result[1].isInteger() && (result[1].toInteger() < 1)){
+              common.successMsg("Backup successfully finished " + result[1] + " fatals, " + result[2] + " errors " + result[3] +" warnings.")
+            }
+            else {
+                common.errorMsg("Backup failed. Found " + result[1] + " fatals, " + result[2] + " errors " + result[3] +" warnings.")
+                currentBuild.result = "FAILURE"
+                return
+            }
+        }
+    }
+}
diff --git a/backupninja-restore-pipeline.groovy b/backupninja-restore-pipeline.groovy
new file mode 100644
index 0000000..b38cd6a
--- /dev/null
+++ b/backupninja-restore-pipeline.groovy
@@ -0,0 +1,55 @@
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def pepperEnv = "pepperEnv"
+
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+        stage('Salt-Master restore') {
+            common.infoMsg('Verify pillar for salt-master backups')
+            try {
+                def masterPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
+                if(masterPillar['return'].isEmpty()) {
+                    throw new Exception('Problem with salt-master pillar.')
+                }
+                def minionPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
+                if(minionPillar['return'].isEmpty()) {
+                    throw new Exception('Problem with salt-minion pillar.')
+                }
+            }
+            catch (Exception e){
+                common.errorMsg(e.getMessage())
+                common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/salt-master/salt-master-restore.html')
+                return
+            }
+            common.infoMsg('Performing restore')
+            salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.master.restore'])
+            salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.minion.restore'])
+            salt.fullRefresh(pepperEnv, '*')
+
+            common.infoMsg('Validating output')
+            common.infoMsg('Salt-Keys')
+            salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key")
+            common.infoMsg('Salt-master CA')
+            salt.cmdRun(pepperEnv, 'I@salt:master', "ls -la /etc/pki/ca/salt_master_ca/")
+        }
+        stage('MAAS Restore') {
+            common.infoMsg('Verify pillar for MaaS backup')
+            try {
+                def maaSPillar = salt.getPillar(pepperEnv, "I@maas:server", 'maas:region:database:initial_data')
+                if(maaSPillar['return'].isEmpty()) {
+                    throw new Exception('Problem with MaaS pillar.')
+                }
+            }
+            catch (Exception e){
+                common.errorMsg(e.getMessage())
+                common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/backupninja-postgresql/backupninja-postgresql-restore.html')
+                return
+            }
+            salt.enforceState(['saltId': pepperEnv, 'target': 'I@maas:region', 'state': 'maas.region'])
+        }
+    }
+}
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e616a28..0fba6a0 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -90,6 +90,10 @@
             stage('Remove Ceph RGW') {
                 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
             }
+
+            stage('Purge Ceph RGW pkgs') {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
+            }
         }
 
         if (HOST_TYPE.toLowerCase() != 'osd') {
@@ -222,7 +226,7 @@
 
             // purge Ceph pkgs
             stage('Purge Ceph OSD pkgs') {
-                runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
             }
 
             stage('Remove OSD host from crushmap') {
@@ -294,6 +298,10 @@
                     salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
                 }
             }
+
+            stage('Purge Ceph MON pkgs') {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
+            }
         }
 
         if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86a1f0f..c4881bc 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -16,6 +16,9 @@
  *  STAGE_UPGRADE_OSD               Set to True if Ceph osd nodes upgrade is desired
  *  STAGE_UPGRADE_RGW               Set to True if Ceph rgw nodes upgrade is desired
  *  STAGE_UPGRADE_CLIENT            Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
+ *  STAGE_FINALIZE                  Set to True if configs recommended for TARGET_RELEASE should be set after upgrade is done
+ *  BACKUP_ENABLED                  Select to copy the disks of Ceph VMs before upgrade and backup Ceph directories on OSD nodes
+ *  BACKUP_DIR                      Select the target dir to backup to when BACKUP_ENABLED
  *
  */
 
@@ -71,12 +74,12 @@
 
                 waitForHealthy(master)
                 try {
-                    salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+                    salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
                 } catch (Exception e) {
                     common.warningMsg('Backup already exists')
                 }
                 try {
-                    salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
+                    salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak")
                 } catch (Exception e) {
                     common.warningMsg('Backup already exists')
                 }
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 6b6ec4e..08ebbd1 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -334,6 +334,9 @@
             // Install
             //
 
+            // Check if all minions are reachable and ready
+            salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*'])
+
             if (common.checkContains('STACK_INSTALL', 'core')) {
                 stage('Install core infrastructure') {
                     def staticMgmtNetwork = false
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 4a231dc..80160ab 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -36,6 +36,10 @@
               TARGET_NODE = "I@gerrit:client"
             }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index e96a34c..e933984 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -42,6 +42,10 @@
                   TEMPEST_TARGET_NODE = "I@gerrit:client"
                 }
                 saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+                if (!os_version) {
+                    throw new Exception("Openstack is not found on this env. Exiting")
+                }
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
                 keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 62f5226..ebb7987 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -32,19 +32,24 @@
               TARGET_NODE = "I@gerrit:client"
             }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
+            container_name = "${env.JOB_NAME}"
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
             if (!keystone_creds) {
                 keystone_creds = validate._get_keystone_creds_v2(saltMaster)
             }
-            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
-            validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "")
+            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, container_name, keystone_creds)
+            validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "", "internalURL", "", "", [], container_name)
         }
 
         stage('Run Rally tests') {
             sh "mkdir -p ${artifacts_dir}"
-            validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir)
+            validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir, "docker-rally", container_name)
         }
 
         stage('Collect results') {
@@ -59,7 +64,8 @@
         throw e
     } finally {
         if (DEBUG_MODE == 'false') {
-            validate.runCleanup(saltMaster, TARGET_NODE)
+            validate.openstack_cleanup(saltMaster, TARGET_NODE, container_name)
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
         }
     }
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 22aa9d0..b9649d5 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -5,148 +5,126 @@
  * Expected parameters:
  *   SALT_MASTER_URL                 URL of Salt master
  *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *   IMAGE                           Docker image link to use for running container with test framework.
+ *   EXTRA_PARAMS                    Yaml context which contains additional setting for job
  *
- *   TESTS_SET                       Leave empty for full run or choose a file (test)
- *   TESTS_REPO                      Repo to clone
- *   TESTS_SETTINGS                  Additional environment varibales to apply
- *   PROXY                           Proxy to use for cloning repo or for pip
- *   IMAGE                           Docker image to use for running container with test framework.
- *   DEBUG_MODE                      If you need to debug (keep container after test), please enabled this
- *  To launch tests from docker images need to set IMAGE and left TESTS_REPO empty
  */
 
 common = new com.mirantis.mk.Common()
 validate = new com.mirantis.mcp.Validate()
-salt = new com.mirantis.mk.Salt()
 salt_testing = new com.mirantis.mk.SaltModelTesting()
-def artifacts_dir = "validation_artifacts"
-def remote_dir = '/root/qa_results'
-def container_workdir = '/var/lib'
-def container_name = "${env.JOB_NAME}"
-def xml_file = "${container_name}_report.xml"
-def TARGET_NODE = "I@gerrit:client"
-def reinstall_env = false
 
-def saltMaster
-def settings
+def EXTRA_PARAMS = readYaml(text: env.getProperty('EXTRA_PARAMS')) ?: [:]
+def env_vars = EXTRA_PARAMS.get("envs") ?: []
 
-slaveNode = (env.getProperty('SLAVE_NODE')) ?: 'docker'
-imageName = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-spt:stable'
+def IMAGE = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-sanity-checks:stable'
+def SLAVE_NODE = (env.getProperty('SLAVE_NODE')) ?: 'docker'
 
-node(slaveNode) {
-    try{
-        stage('Initialization') {
-            sh "rm -rf ${artifacts_dir}"
-            // TODO collaps TESTS_SETTINGS flow into EXTRA variables map
-            if ( TESTS_SETTINGS != "" ) {
-                for (var in TESTS_SETTINGS.tokenize(";")) {
-                    key = var.tokenize("=")[0].trim()
-                    value = var.tokenize("=")[1].trim()
-                    if (key == 'TARGET_NODE') {
-                        TARGET_NODE = value
-                        common.infoMsg("Node for container is set to ${TARGET_NODE}")
-                    }
-                    if (key == 'REINSTALL_ENV') {
-                        reinstall_env = value.toBoolean()
-                    }
-                }
-            }
-            if ( IMAGE == "" ) {
-                common.infoMsg("Env for tests will be built on Jenkins slave")
-                TARGET_NODE = ""
-                validate.prepareVenv(TESTS_REPO, PROXY)
-            } else {
-                saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}/")
-                salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}/")
-                validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
-                if ( TESTS_REPO != "") {
-                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/${container_name}")
-                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
-                    TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
-                    if ( reinstall_env ) {
-                        common.infoMsg("Pip packages in container will be reinstalled based on requirements.txt from ${TESTS_REPO}")
-                        salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} pip install --force-reinstall -r ${container_workdir}/${container_name}/requirements.txt")
-                    }
-                }
-            }
-        }
+/*
+YAML example
+=====
+# commands is a map of commands which looks like step_name: shell_command
+commands:
+  001_prepare: rm /var/lib/g.txt
+  002_prepare: git clone http://repo_with_tests.git
+  003_test: cd repo_with_tests && pytest /var/lib/ --collect-only
+  004_collect: cp cvp-spt /var/lib/validation_artifacts/
+# envs is a list of new environment variables
+envs:
+  - SALT_USERNAME=admin
+  - SALT_PASSWORD=password
+  - drivetrain_version=testing
+*/
 
-        stage('Run Tests') {
-            def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
-            def username = creds.username
-            def password = creds.password
-            def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET} -vv"
+node (SLAVE_NODE) {
+    def artifacts_dir = 'validation_artifacts'
+    def test_suite_name = "${env.JOB_NAME}"
+    def xml_file = "${test_suite_name}_report.xml"
 
-            sh "mkdir -p ${artifacts_dir}"
+    def configRun = [:]
+    try {
+        withEnv(env_vars) {
+            stage('Initialization') {
+                def container_workdir = '/var/lib'
+                def workdir = "${container_workdir}/${test_suite_name}"
+                def tests_set = (env.getProperty('tests_set')) ?: ''
+                def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -vv ${tests_set}"
 
-            def configRun = [
-                'image': imageName,
-                'baseRepoPreConfig': false,
-                'dockerMaxCpus': 2,
-                'dockerExtraOpts' : [
-                    "--network=host",
-                    "-v /root/qa_results/:/root/qa_results/",
-                    "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
-                    // TODO remove if all docker images with tests (like cvp-spt) will be transferred into new architucture (like cvp-sanity)
-                    "--entrypoint=''",  // to override ENTRYPOINT=/bin/bash in Dockerfile of image
-                ],
+                sh "mkdir -p ${artifacts_dir}"
 
-                'envOpts'         : [
-                    "SALT_USERNAME=${username}",
-                    "SALT_PASSWORD=${password}",
-                    "SALT_URL=${SALT_MASTER_URL}"
-                ] + TESTS_SETTINGS.replaceAll('\\"', '').tokenize(";"),
-                'runCommands'     : [
-                      '010_start_tests'    : {
-                          sh("cd ${container_workdir} && ${script}")
-                      }
-                  ]
+                // Enrichment for docker commands
+                def commands = EXTRA_PARAMS.get("commands") ?: ['010_start_tests': "cd ${workdir} && with_venv.sh ${script}"]
+                def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
+
+                // Enrichment for env variables
+                def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
+                def env_vars_list  =  [
+                    "SALT_USERNAME=${creds.username}",
+                    "SALT_PASSWORD=${creds.password}",
+                    "SALT_URL=${SALT_MASTER_URL}",
+                    "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
+                    ] + env_vars
+
+                // Generating final config
+                configRun = [
+                    'image': IMAGE,
+                    'baseRepoPreConfig': false,
+                    'dockerMaxCpus': 2,
+                    'dockerExtraOpts' : [
+                        "--network=host",
+                        "-v /root/qa_results/:/root/qa_results/",
+                        "-v /etc/ssl/certs/:/etc/ssl/certs/:ro",
+                        "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
+                    ],
+                    'envOpts'         : env_vars_list,
+                    'runCommands'     : commands_list
                 ]
-            salt_testing.setupDockerAndTest(configRun)
-        }
+            }
 
-        stage ('Publish results') {
-            archiveArtifacts artifacts: "${artifacts_dir}/*"
-            junit "${artifacts_dir}/*.xml"
-            if (env.JOB_NAME.contains("cvp-spt")) {
-                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
-                     group: 'SPT',
-                     style: 'line',
-                     title: 'SPT Glance results',
-                     xmlSeries: [[
-                     file: "${env.JOB_NAME}_report.xml",
-                     nodeType: 'NODESET',
-                     url: '',
-                     xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
-                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
-                     group: 'SPT',
-                     style: 'line',
-                     title: 'SPT HW2HW results',
-                     xmlSeries: [[
-                     file: "${env.JOB_NAME}_report.xml",
-                     nodeType: 'NODESET',
-                     url: '',
-                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
-                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
-                     group: 'SPT',
-                     style: 'line',
-                     title: 'SPT VM2VM results',
-                     xmlSeries: [[
-                     file: "${env.JOB_NAME}_report.xml",
-                     nodeType: 'NODESET',
-                     url: '',
-                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+            stage('Run Tests') {
+                salt_testing.setupDockerAndTest(configRun)
+            }
+
+            stage ('Publish results') {
+                archiveArtifacts artifacts: "${artifacts_dir}/*"
+                junit "${artifacts_dir}/*.xml"
+                if (env.JOB_NAME.contains("cvp-spt")) {
+                    plot csvFileName: 'plot-glance.csv',
+                        group: 'SPT',
+                        style: 'line',
+                        title: 'SPT Glance results',
+                        xmlSeries: [[
+                        file: "${artifacts_dir}/${xml_file}",
+                        nodeType: 'NODESET',
+                        url: '',
+                        xpath: '/testsuite/testcase[@classname="tests.test_glance"]/properties/property']]
+                    plot csvFileName: 'plot-hw2hw.csv',
+                        group: 'SPT',
+                        style: 'line',
+                        title: 'SPT HW2HW results',
+                        xmlSeries: [[
+                        file: "${artifacts_dir}/${xml_file}",
+                        nodeType: 'NODESET',
+                        url: '',
+                        xpath: '/testsuite/testcase[@classname="tests.test_hw2hw"]/properties/property']]
+                    plot csvFileName: 'plot-vm2vm.csv',
+                        group: 'SPT',
+                        style: 'line',
+                        title: 'SPT VM2VM results',
+                        xmlSeries: [[
+                        file: "${artifacts_dir}/${xml_file}",
+                        nodeType: 'NODESET',
+                        url: '',
+                        xpath: '/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
+                }
             }
         }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
+    }
+    catch (Throwable e) {
         currentBuild.result = "FAILURE"
         throw e
-    } finally {
-        if (DEBUG_MODE == 'false') {
-            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
-            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
-        }
+    }
+    finally {
+        sh "rm -rf ${artifacts_dir}"
     }
 }
diff --git a/cvp-runner_old.groovy b/cvp-runner_old.groovy
new file mode 100644
index 0000000..22aa9d0
--- /dev/null
+++ b/cvp-runner_old.groovy
@@ -0,0 +1,152 @@
+/**
+ *
+ * Launch pytest frameworks in Jenkins
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL                 URL of Salt master
+ *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *   TESTS_SET                       Leave empty for full run or choose a file (test)
+ *   TESTS_REPO                      Repo to clone
+ *   TESTS_SETTINGS                  Additional environment varibales to apply
+ *   PROXY                           Proxy to use for cloning repo or for pip
+ *   IMAGE                           Docker image to use for running container with test framework.
+ *   DEBUG_MODE                      If you need to debug (keep container after test), please enabled this
+ *  To launch tests from docker images need to set IMAGE and left TESTS_REPO empty
+ */
+
+common = new com.mirantis.mk.Common()
+validate = new com.mirantis.mcp.Validate()
+salt = new com.mirantis.mk.Salt()
+salt_testing = new com.mirantis.mk.SaltModelTesting()
+def artifacts_dir = "validation_artifacts"
+def remote_dir = '/root/qa_results'
+def container_workdir = '/var/lib'
+def container_name = "${env.JOB_NAME}"
+def xml_file = "${container_name}_report.xml"
+def TARGET_NODE = "I@gerrit:client"
+def reinstall_env = false
+
+def saltMaster
+def settings
+
+slaveNode = (env.getProperty('SLAVE_NODE')) ?: 'docker'
+imageName = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-spt:stable'
+
+node(slaveNode) {
+    try{
+        stage('Initialization') {
+            sh "rm -rf ${artifacts_dir}"
+            // TODO collaps TESTS_SETTINGS flow into EXTRA variables map
+            if ( TESTS_SETTINGS != "" ) {
+                for (var in TESTS_SETTINGS.tokenize(";")) {
+                    key = var.tokenize("=")[0].trim()
+                    value = var.tokenize("=")[1].trim()
+                    if (key == 'TARGET_NODE') {
+                        TARGET_NODE = value
+                        common.infoMsg("Node for container is set to ${TARGET_NODE}")
+                    }
+                    if (key == 'REINSTALL_ENV') {
+                        reinstall_env = value.toBoolean()
+                    }
+                }
+            }
+            if ( IMAGE == "" ) {
+                common.infoMsg("Env for tests will be built on Jenkins slave")
+                TARGET_NODE = ""
+                validate.prepareVenv(TESTS_REPO, PROXY)
+            } else {
+                saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}/")
+                salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}/")
+                validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
+                if ( TESTS_REPO != "") {
+                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/${container_name}")
+                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
+                    TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
+                    if ( reinstall_env ) {
+                        common.infoMsg("Pip packages in container will be reinstalled based on requirements.txt from ${TESTS_REPO}")
+                        salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} pip install --force-reinstall -r ${container_workdir}/${container_name}/requirements.txt")
+                    }
+                }
+            }
+        }
+
+        stage('Run Tests') {
+            def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
+            def username = creds.username
+            def password = creds.password
+            def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET} -vv"
+
+            sh "mkdir -p ${artifacts_dir}"
+
+            def configRun = [
+                'image': imageName,
+                'baseRepoPreConfig': false,
+                'dockerMaxCpus': 2,
+                'dockerExtraOpts' : [
+                    "--network=host",
+                    "-v /root/qa_results/:/root/qa_results/",
+                    "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
+                    // TODO remove if all docker images with tests (like cvp-spt) will be transferred into new architucture (like cvp-sanity)
+                    "--entrypoint=''",  // to override ENTRYPOINT=/bin/bash in Dockerfile of image
+                ],
+
+                'envOpts'         : [
+                    "SALT_USERNAME=${username}",
+                    "SALT_PASSWORD=${password}",
+                    "SALT_URL=${SALT_MASTER_URL}"
+                ] + TESTS_SETTINGS.replaceAll('\\"', '').tokenize(";"),
+                'runCommands'     : [
+                      '010_start_tests'    : {
+                          sh("cd ${container_workdir} && ${script}")
+                      }
+                  ]
+                ]
+            salt_testing.setupDockerAndTest(configRun)
+        }
+
+        stage ('Publish results') {
+            archiveArtifacts artifacts: "${artifacts_dir}/*"
+            junit "${artifacts_dir}/*.xml"
+            if (env.JOB_NAME.contains("cvp-spt")) {
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT Glance results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT HW2HW results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT VM2VM results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+            }
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
+        }
+    }
+}
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
new file mode 100644
index 0000000..c311186
--- /dev/null
+++ b/cvp-tempest.groovy
@@ -0,0 +1,178 @@
+/**
+ *
+ * Launch CVP Tempest verification of the cloud
+ *
+ * Expected parameters:
+
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ *   SERVICE_NODE                Node, where runtest formula and some other states will be executed
+ *   VERBOSE                     Show salt output in Jenkins console
+ *   DEBUG_MODE                  Remove or keep container after the test
+ *   STOP_ON_ERROR               Stop pipeline if error during salt run occurs
+ *   GENERATE_CONFIG             Run runtest formula / generate Tempest config
+ *   SKIP_LIST_PATH              Path to skip list (not in use right now)
+ *   TEST_IMAGE                  Docker image link to use for running container with testing tools.
+ *   TARGET_NODE                 Node to run container with Tempest/Rally
+ *   PREPARE_RESOURCES           Prepare Openstack resources before test run
+ *   TEMPEST_TEST_PATTERN        Tests to run
+ *   TEMPEST_ENDPOINT_TYPE       Type of OS endpoint to use during test run (not in use right now)
+ *   concurrency                 Number of threads to use for Tempest test run
+ *   remote_artifacts_dir        Folder to use for artifacts on remote node
+ *   runtest_tempest_cfg_dir     Folder to use to generate and store tempest.conf
+ *   runtest_tempest_cfg_name    Tempest config name
+ *   report_prefix               Some prefix to put to report name
+ *
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def saltMaster
+extraYamlContext = env.getProperty('EXTRA_PARAMS')
+if (extraYamlContext) {
+    common.mergeEnv(env, extraYamlContext) }
+def SALT_MASTER_CREDENTIALS=(env.SALT_MASTER_CREDENTIALS) ?: 'salt'
+def VERBOSE = (env.VERBOSE) ? env.VERBOSE.toBoolean() : true
+def DEBUG_MODE = (env.DEBUG_MODE) ?: false
+def STOP_ON_ERROR = (env.STOP_ON_ERROR) ? env.STOP_ON_ERROR.toBoolean() : false
+def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+// do not change unless you know what you're doing
+def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
+def report_prefix = (env.report_prefix) ?: ''
+def args = ''
+def mounts = [:]
+node() {
+    stage('Initialization') {
+        deleteDir()
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        container_name = "${env.JOB_NAME}"
+        cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+        os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+        if (!os_version) {
+            throw new Exception("Openstack is not found on this env. Exiting")
+        }
+        TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+        runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+        if (runtest_node.values()[0]) {
+            // Let's use Service node that was defined in reclass. If several nodes are defined
+            // we will use the first from salt output
+            common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+            SERVICE_NODE = runtest_node.keySet()[0]
+        }
+        else {
+            throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+                                "into reclass. Check documentation for more details")
+        }
+        common.infoMsg('Refreshing pillars on service node')
+        salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+        // default node is cid01 (preferably) or cfg01
+        default_node=salt.getPillar(saltMaster, 'I@salt:master', '_param:cicd_control_node01_hostname')['return'][0].values()[0] ?: 'cfg01'
+        // fetch tempest_test_target from runtest.yaml, otherwise fallback to default_node
+        tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: default_node+'*'
+        // TARGET_NODE will always override any settings above
+        TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+    }
+    stage('Preparing resources') {
+        if ( PREPARE_RESOURCES.toBoolean() ) {
+            common.infoMsg('Running salt.minion state on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+            common.infoMsg('Running keystone.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+            common.infoMsg('Running glance.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+            common.infoMsg('Running nova.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+        }
+        else {
+            common.infoMsg('Skipping resources preparation')
+        }
+    }
+    stage('Generate config') {
+        if ( GENERATE_CONFIG.toBoolean() ) {
+            // default is /root/test/
+            runtest_tempest_cfg_dir = (env.runtest_tempest_cfg_dir) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0]
+            // default is tempest_generated.conf
+            runtest_tempest_cfg_name = (env.runtest_tempest_cfg_name) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_name')['return'][0].values()[0]
+            common.infoMsg("runtest_tempest_cfg is ${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}")
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${runtest_tempest_cfg_dir}"])
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${runtest_tempest_cfg_dir}"])
+            fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+            if (TARGET_NODE != tempest_node) {
+                common.infoMsg("TARGET_NODE is defined in Jenkins")
+                def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+                common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
+                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                             null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
+                salt.checkResult(result)
+            }
+            common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+            // runtest state hangs if tempest_test_target is cfg01*
+            // let's run runtest.generate_tempest_config only for this case
+            if (TARGET_NODE == 'cfg01*') {
+                common.warningMsg("It is not recommended to run Tempest container on cfg node, but.. proceeding")
+                salt.enforceState(saltMaster, SERVICE_NODE, 'runtest.generate_tempest_config', VERBOSE, STOP_ON_ERROR)
+            } else {
+                salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
+            }
+            // we need to refresh pillars on target node after runtest state
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+            if (TARGET_NODE != tempest_node) {
+                common.infoMsg("Reverting tempest_test_target parameter")
+                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                             null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+            }
+            SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+            if (SKIP_LIST_PATH) {
+                mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/root/tempest/skip.list"]
+                salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+                args += ' --blacklist-file /root/tempest/skip.list '
+            }
+        }
+        else {
+            common.infoMsg('Skipping Tempest config generation')
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+        }
+    }
+
+    try{
+        stage('Run Tempest tests') {
+            mounts = mounts + ["${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}": "/etc/tempest/tempest.conf"]
+            validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
+                                  mounts: mounts, name: container_name)
+            report_prefix += 'tempest_'
+            if (env.concurrency) {
+                args += ' -w ' + env.concurrency
+            }
+            if (TEMPEST_TEST_PATTERN == 'set=smoke') {
+                args += ' -s '
+                report_prefix += 'smoke'
+            }
+            else {
+                if (TEMPEST_TEST_PATTERN != 'set=full') {
+                    args += " -r ${TEMPEST_TEST_PATTERN} "
+                    report_prefix += 'custom'
+                }
+            }
+            salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
+        }
+        stage('Collect results') {
+            report_prefix += "_report_${env.BUILD_NUMBER}"
+            // will be removed after changing runtest-formula logic
+            salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}/reports; mv ${remote_artifacts_dir}/report_* ${remote_artifacts_dir}/reports")
+            validate.addFiles(saltMaster, TARGET_NODE, "${remote_artifacts_dir}/reports", '')
+            sh "mv report_*.xml ${report_prefix}.xml"
+            sh "mv report_*.log ${report_prefix}.log"
+            archiveArtifacts artifacts: "${report_prefix}.*"
+            junit "${report_prefix}.xml"
+        }
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
+        }
+    }
+}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 124f96b..62a6e00 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -140,11 +140,15 @@
                         def secretKeyID = RequesterEmail ?: "salt@${context['cluster_domain']}".toString()
                         if (!context.get('secrets_encryption_private_key')) {
                             def batchData = """
+                                %echo Generating a basic OpenPGP key for Salt-Master
+                                %no-protection
                                 Key-Type: 1
                                 Key-Length: 4096
                                 Expire-Date: 0
                                 Name-Real: ${context['salt_master_hostname']}.${context['cluster_domain']}
                                 Name-Email: ${secretKeyID}
+                                %commit
+                                %echo done
                             """.stripIndent()
                             writeFile file:'gpg-batch.txt', text:batchData
                             sh "gpg --gen-key --batch < gpg-batch.txt"
@@ -152,7 +156,7 @@
                         } else {
                             writeFile file:'gpgkey.asc', text:context['secrets_encryption_private_key']
                             sh "gpg --import gpgkey.asc"
-                            secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | awk -F: -e "/^sec/{print \\$5; exit}"').trim()
+                            secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | grep -E "^sec" | awk -F: \'{print \$5}\'').trim()
                         }
                         context['secrets_encryption_key_id'] = secretKeyID
                     }
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index fa49bbc..6f14866 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -5,6 +5,22 @@
     timeout(time: 12, unit: 'HOURS') {
         node() {
             try {
+                def sourceCreds = env.SOURCE_CREDENTIALS
+                if (sourceCreds && common.getCredentialsById(sourceCreds, 'password')) {
+                    withCredentials([
+                            [$class          : 'UsernamePasswordMultiBinding',
+                             credentialsId   : sourceCreds,
+                             passwordVariable: 'GIT_PASS',
+                             usernameVariable: 'GIT_USER']
+                    ]) {
+                        sh """
+                            set +x
+                            git config --global credential.${SOURCE_URL}.username \${GIT_USER}
+                            echo "echo \${GIT_PASS}" > askpass.sh && chmod +x askpass.sh
+                        """
+                        env.GIT_ASKPASS = "${env.WORKSPACE}/askpass.sh"
+                    }
+                }
                 if (BRANCHES == '*' || BRANCHES.contains('*')) {
                     branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
                 } else {
@@ -18,7 +34,8 @@
                 dir('source') {
                     checkout changelog: true, poll: true,
                         scm: [$class    : 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [],
+                              userRemoteConfigs: [[credentialsId: sourceCreds, url: SOURCE_URL]]]
                     git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, branches, true)
                 }
             } catch (Throwable e) {
@@ -26,6 +43,9 @@
                 currentBuild.result = 'FAILURE'
                 currentBuild.description = currentBuild.description ? e.message + '' + currentBuild.description : e.message
                 throw e
+            } finally {
+                sh "git config --global --unset credential.${SOURCE_URL}.username || true"
+                deleteDir()
             }
         }
     }
diff --git a/libvirt-live-snapshots.groovy b/libvirt-live-snapshots.groovy
index 59418b0..10cb4a1 100644
--- a/libvirt-live-snapshots.groovy
+++ b/libvirt-live-snapshots.groovy
@@ -11,7 +11,7 @@
  *   NODE_PROVIDER                      KVM node that hosts the VM (for ex. kvm02)
  *   TARGET                             Unique identification of the VM being snapshoted without domain name (for ex. ctl01)
  *   SNAPSHOT_NAME                      Snapshot name
- *   PATH                               Path where snapshot image and dumpxml are being put
+ *   LIBVIRT_IMAGES_PATH                Path where snapshot image and dumpxml are being put
  *   DISK_NAME                          Disk name of the snapshot
  *
 **/
@@ -31,27 +31,27 @@
 
         if (CREATE_LIVE_SNAPSHOT.toBoolean() == true) {
             stage('Create live snapshot') {
-                virsh.liveSnapshotPresent(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, PATH, DISK_NAME)
+                virsh.liveSnapshotPresent(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, LIBVIRT_IMAGES_PATH, DISK_NAME)
             }
         }
 
         if (REMOVE_LIVE_SNAPSHOT.toBoolean() == true) {
             stage('Remove live snapshot') {
-                virsh.liveSnapshotAbsent(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, PATH)
+                virsh.liveSnapshotAbsent(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, LIBVIRT_IMAGES_PATH)
             }
         }
 
         if (ROLLBACK_LIVE_SNAPSHOT.toBoolean() == true) {
             stage('Rollback live snapshot') {
                 sleep(30)
-                virsh.liveSnapshotRollback(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, PATH)
+                virsh.liveSnapshotRollback(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, LIBVIRT_IMAGES_PATH)
             }
         }
 
         if (MERGE_LIVE_SNAPSHOT.toBoolean() == true) {
             stage('Merge live snapshot') {
                 sleep(30)
-                virsh.liveSnapshotMerge(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, PATH, DISK_NAME)
+                virsh.liveSnapshotMerge(pepperEnv, NODE_PROVIDER, TARGET, SNAPSHOT_NAME, LIBVIRT_IMAGES_PATH, DISK_NAME)
             }
         }
     }
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
new file mode 100644
index 0000000..aabdafc
--- /dev/null
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -0,0 +1,155 @@
+/**
+ * Upgrade RabbitMQ packages on msg nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS            Credentials to the Salt API.
+ *   SALT_MASTER_URL                    Full Salt API address [http://10.10.10.15:6969].
+ *   OS_DIST_UPGRADE                    Upgrade system packages including kernel (apt-get dist-upgrade)
+ *   OS_UPGRADE                         Upgrade all installed applications (apt-get upgrade)
+ *   TARGET_SERVERS                     Comma separated list of salt compound definitions to upgrade.
+ *   INTERACTIVE                        Ask interactive questions during pipeline run (bool).
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
+
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+  [
+    'Description': 'Only non destructive actions will be applied during this phase. Basic service verification will be performed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+    'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify rabbitmq is running and operational.''',
+    'State result': 'Basic checks around services API are passed.'
+  ])
+
+upgradeStageMap.put('Stop RabbitMQ service',
+  [
+    'Description': 'All rabbitmq services will be stopped on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * RabbitMQ services are stopped.
+ * OpenStack APIs are not accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Stop RabbitMQ services''',
+    'State result': 'RabbitMQ service is stopped',
+  ])
+
+upgradeStageMap.put('Upgrade OS',
+  [
+    'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * No workload downtime
+ * The nodes might be rebooted''',
+    'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * System packages are updated
+ * Node might be rebooted
+'''
+  ])
+
+upgradeStageMap.put('Upgrade RabbitMQ server',
+   [
+    'Description': 'RabbitMQ and Erlang code will be upgraded during this stage. No workload downtime is expected.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * OpenStack services loose connection to rabbitmq-server
+ * No workload downtime''',
+    'Launched actions': '''
+ * Install new version of RabbitMQ and Erlang packages
+ * Render version of configs''',
+    'State result': '''
+ * RabbitMQ packages are upgraded''',
+  ])
+
+upgradeStageMap.put('Start RabbitMQ service',
+   [
+    'Description': 'All rabbitmq services will be running on All TARGET_SERVERS nodes.',
+    'Status': 'NOT_LAUNCHED',
+    'Expected behaviors': '''
+ * RabbitMQ service is running.
+ * OpenStack API are accessible from this point.
+ * No workload downtime''',
+    'Launched actions': '''
+ * Start RabbitMQ service''',
+    'State result': 'RabbitMQ service is running',
+  ])
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+
+    stage('Setup virtualenv for Pepper') {
+      python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+    if (upgradeTargets.isEmpty()) {
+      error("No servers for upgrade matched by ${TARGET_SERVERS}")
+    }
+
+    def stopTargets = upgradeTargets.reverse()
+
+    common.printStageMap(upgradeStageMap)
+    if (interactive){
+      input message: common.getColorizedString(
+        "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+    }
+
+    for (target in upgradeTargets){
+      common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
+      }
+    }
+
+    for (target in stopTargets) {
+      common.stageWrapper(upgradeStageMap, "Stop RabbitMQ service", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+        if (OS_DIST_UPGRADE.toBoolean() == true){
+          upgrade_mode = 'dist-upgrade'
+        } else if (OS_UPGRADE.toBoolean() == true){
+          upgrade_mode = 'upgrade'
+        }
+        if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+          debian.osUpgradeNode(env, target, upgrade_mode, false)
+        }
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Upgrade RabbitMQ server", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+        openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+      }
+    }
+
+    for (target in upgradeTargets) {
+      common.stageWrapper(upgradeStageMap, "Start RabbitMQ service", target, interactive) {
+        openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+        openstack.applyOpenstackAppsStates(env, target)
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
+      }
+    }
+  }
+}
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 85b93e9..78765bb 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -49,24 +49,37 @@
 def verify_es_is_green(master) {
     common.infoMsg('Verify that the Elasticsearch cluster status is green')
     try {
-        def retries_wait = 20
-        def retries = 15
+        def retries_wait = 120
+        def retries = 60
+
         def elasticsearch_vip
-        def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
-        if(!pillar['return'].isEmpty()) {
-            elasticsearch_vip = pillar['return'][0].values()[0]
+        def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
+        if(pillar) {
+            elasticsearch_vip = pillar
         } else {
             errorOccured = true
             common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
         }
-        pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+
+        pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port'))
         def elasticsearch_port
-        if(!pillar['return'].isEmpty()) {
-            elasticsearch_port = pillar['return'][0].values()[0]
+        if(pillar) {
+            elasticsearch_port = pillar
         } else {
             errorOccured = true
             common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
         }
+
+        pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:scheme'))
+        def elasticsearch_scheme
+        if(pillar) {
+            elasticsearch_scheme = pillar
+            common.infoMsg("[INFO] Using elasticsearch scheme: ${elasticsearch_scheme}")
+        } else {
+            common.infoMsg('[INFO] No pillar with Elasticsearch server scheme, using scheme: http')
+            elasticsearch_scheme = "http"
+        }
+
         common.retry(retries,retries_wait) {
             common.infoMsg('Waiting for Elasticsearch to become green..')
             salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
@@ -204,8 +217,11 @@
                     common.infoMsg('Start the monitoring services')
                     salt.enforceState([saltId: pepperEnv, target: 'I@docker:swarm:role:master and I@prometheus:server', state: 'docker'])
                     salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+                    common.infoMsg("Waiting grafana service to start")
+                    sleep(120)
+
                     common.infoMsg('Refresh the Grafana dashboards')
-                    salt.enforceState([saltId: pepperEnv, target: 'I@grafana:client', state: 'grafana.client'])
+                    salt.enforceState([saltId: pepperEnv, target: 'I@grafana:client', state: 'grafana.client', retries: 10, retries_wait: 30])
                 } catch (Exception er) {
                     errorOccured = true
                     common.errorMsg("[ERROR] Upgrade of docker components failed. Please fix it manually.")
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 59c616e..72ac2d5 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -24,18 +24,17 @@
     return salt.cmdRun(master, target, cmd)
 }
 
-def waitForHealthy(master, tgt, attempts=100, timeout=10) {
+def waitForHealthy(master, tgt, count=0, attempts=100) {
     // wait for healthy cluster
     common = new com.mirantis.mk.Common()
-    common.retry(attempts, timeout){
+    while (count<attempts) {
         def health = runCephCommand(master, tgt, 'ceph health')['return'][0].values()[0]
         if (health.contains('HEALTH_OK') || health.contains('HEALTH_WARN noout flag(s) set\n')) {
             common.infoMsg('Cluster is healthy')
-            return 0
-        } else {
-            common.infoMsg(health)
-            throw new Exception()
+            break;
         }
+        count++
+        sleep(10)
     }
 }
 
diff --git a/update-glusterfs-clients.groovy b/update-glusterfs-clients.groovy
new file mode 100644
index 0000000..02e889a
--- /dev/null
+++ b/update-glusterfs-clients.groovy
@@ -0,0 +1,119 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ *   DRIVE_TRAIN_PARAMS         Yaml, DriveTrain releated params:
+ *     SALT_MASTER_CREDENTIALS              Credentials to the Salt API
+ *     SALT_MASTER_URL                      Full Salt API address [https://10.10.10.1:8000]
+ *     IGNORE_SERVER_STATUS                 Does not validate server availability/status before update
+ *     IGNORE_SERVER_VERSION                Does not validate that all servers have been updated
+ *     TARGET_SERVERS                       Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+  value = params[key]
+  env.setProperty(key, value)
+}
+
+@NonCPS
+def getNextNode() {
+  for (n in hudson.model.Hudson.instance.slaves) {
+    node_name = n.getNodeName()
+    if (node_name != env.SLAVE_NAME) {
+      return node_name
+    }
+  }
+}
+
+def update() {
+  def pEnv = "pepperEnv"
+  def salt = new com.mirantis.mk.Salt()
+  def common = new com.mirantis.mk.Common()
+  def python = new com.mirantis.mk.Python()
+  def pkg_name = 'glusterfs-client'
+
+  /**
+   * - choose only those hosts where update is available. Exclude minion on which job is running
+   * - validate that all gluasterfs servers are in normal working state. Can be skipped with option
+   * - validate that glusterfs on all servers has been updated, otherwise stop update. Can be skipped with option
+   * - run update state on one client at a time
+   */
+
+  try {
+
+    stage('Setup virtualenv for Pepper') {
+      python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    stage('List target servers') {
+      all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+      if (all_minions.isEmpty()) {
+        throw new Exception("No minion was targeted")
+      }
+
+      minions = []
+      for (minion in all_minions) {
+        latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+        current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+        slave_container_id = salt.getReturnValues(salt.cmdRun(pEnv, minion, "which docker >/dev/null && docker ps --filter name=jenkins_${env.NODE_NAME} --filter status=running -q", false)).split('\n')[0]
+        if (latest_version != current_version) {
+          if (!slave_container_id.isEmpty() && !minion.startsWith('cfg')) {
+            env.SLAVE_NAME = env.NODE_NAME
+            env.SLAVE_MINION = minion
+          } else {
+            minions.add(minion)
+          }
+        } else {
+          common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+        }
+      }
+    }
+    if (!minions.isEmpty()) {
+      if (!IGNORE_SERVER_STATUS.toBoolean()){
+        stage('Validate servers availability') {
+          salt.commandStatus(pEnv, 'I@glusterfs:server', "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+          common.successMsg("All glusterfs servers are available")
+        }
+      } else {
+        common.warningMsg("Check of glusterfs servers availability has been disabled")
+      }
+      if (!IGNORE_SERVER_VERSION.toBoolean()){
+        stage('Check that all glusterfs servers have been updated') {
+          latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minions[0], 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0].split('-')[0]
+          salt.commandStatus(pEnv, 'I@glusterfs:server', "glusterfsd --version | head -n1 | awk '{print \$2}' | egrep '^${latest_version}' || echo none", latest_version, true, true, null, true, 1)
+          common.successMsg('All glusterfs servers have been updated to desired version')
+        }
+      } else {
+        common.warningMsg("Check of glusterfs servers' version has been disabled")
+      }
+      // Actual update
+      for (tgt in minions) {
+        stage("Update glusterfs on ${tgt}") {
+          salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.client'])
+        }
+      }
+    } else if (env.SLAVE_MINION == null) {
+      common.warningMsg("No hosts to update glusterfs on")
+    }
+  } catch (Throwable e) {
+    // If there was an error or exception thrown, the build failed
+    currentBuild.result = "FAILURE"
+    currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+    salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+    throw e
+  }
+}
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+    update()
+  }
+  // Perform an update from another slave to finish update on previous slave host
+  if (env.SLAVE_NAME != null && !env.SLAVE_NAME.isEmpty()) {
+    node(getNextNode()) {
+      update()
+    }
+  }
+}
diff --git a/update-glusterfs-cluster-op-version.groovy b/update-glusterfs-cluster-op-version.groovy
new file mode 100644
index 0000000..9623481
--- /dev/null
+++ b/update-glusterfs-cluster-op-version.groovy
@@ -0,0 +1,110 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ *   DRIVE_TRAIN_PARAMS         Yaml, DriveTrain releated params:
+ *     SALT_MASTER_CREDENTIALS              Credentials to the Salt API
+ *     SALT_MASTER_URL                      Full Salt API address [https://10.10.10.1:8000]
+ *     IGNORE_CLIENT_VERSION                Does not validate that all clients have been updated
+ *     IGNORE_SERVER_VERSION                Does not validate that all servers have been updated
+ *     CLUSTER_OP_VERSION                   GlusterFS cluster.op-verion option to set. Default is to be set to current cluster.max-op-version if available.
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+  value = params[key]
+  env.setProperty(key, value)
+}
+
+/**
+ * - ensure that cluster.op-version can be updated
+ * - check that all servers have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - check that all clients have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - set cluster.op-version
+ */
+
+/**
+ * Convert glusterfs' cluster.op-version to regular version string
+ *
+ * @param version string representing cluster.op-version, i.e. 50400
+ * @return string version number, i.e. 5.4.0
+ */
+def convertVersion(version) {
+    new_version = version[0]
+    for (i=1;i<version.length();i++) {
+        if (i%2 == 0) {
+            new_version += version[i]
+        } else if (version[i] == '0') {
+            new_version += '.'
+        } else {
+            new_version += '.' + version[i]
+        }
+    }
+    return new_version
+}
+
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+    try {
+
+      stage('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+      }
+      stage('Get current cluster.op-version') {
+        volume = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume list")).split('\n')[0]
+        currentOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+      }
+      if (CLUSTER_OP_VERSION.isEmpty()) {
+        stage('Get cluster.max-op-version') {
+          CLUSTER_OP_VERSION = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get all cluster.max-op-version 2>/dev/null | grep cluster.max-op-version | awk '{print \$2}'")).split('\n')[0]
+        }
+      }
+      if (CLUSTER_OP_VERSION.isEmpty() || CLUSTER_OP_VERSION.length() != 5) {
+        msg = 'No cluster.op-version specified to set'
+        common.errorMsg(msg)
+        currentBuild.result = "FAILURE"
+        currentBuild.description = msg
+      } else if (currentOpVersion == CLUSTER_OP_VERSION) {
+        common.warningMsg("cluster.op-version is already set to ${currentOpVersion}")
+      } else {
+        version = convertVersion(CLUSTER_OP_VERSION)
+        if (!IGNORE_SERVER_VERSION.toBoolean()){
+          stage('Check that all servers have been updated') {
+            salt.commandStatus(pEnv, 'I@glusterfs:server', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+            common.successMsg('All servers have been updated to desired version')
+          }
+        } else {
+          common.warningMsg("Check of servers' version has been disabled")
+        }
+        if (!IGNORE_CLIENT_VERSION.toBoolean()){
+          stage('Check that all clients have been updated') {
+            salt.commandStatus(pEnv, 'I@glusterfs:client', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+            common.successMsg('All clients have been updated to desired version')
+          }
+        } else {
+          common.warningMsg("Check of clients' version has been disabled")
+        }
+        stage("Update cluster.op-version") {
+          salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume set all cluster.op-version ${CLUSTER_OP_VERSION}")
+        }
+        stage("Validate cluster.op-version") {
+          newOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+          if (newOpVersion != CLUSTER_OP_VERSION) {
+            throw new Exception("cluster.op-version was not set to ${CLUSTER_OP_VERSION}")
+          }
+        }
+      }
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      throw e
+    }
+  }
+}
diff --git a/update-glusterfs-servers.groovy b/update-glusterfs-servers.groovy
new file mode 100644
index 0000000..23b280d
--- /dev/null
+++ b/update-glusterfs-servers.groovy
@@ -0,0 +1,92 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ *   DRIVE_TRAIN_PARAMS         Yaml, DriveTrain releated params:
+ *     SALT_MASTER_CREDENTIALS              Credentials to the Salt API
+ *     SALT_MASTER_URL                      Full Salt API address [https://10.10.10.1:8000]
+ *     IGNORE_SERVER_STATUS                 Does not validate server availability/status before update
+ *     IGNORE_NON_REPLICATED_VOLUMES        Update GlusterFS even there is a non-replicated volume(s)
+ *     TARGET_SERVERS                       Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+def pkg_name = 'glusterfs-server'
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+  value = params[key]
+  env.setProperty(key, value)
+}
+
+/**
+ * - choose only those hosts where update is available
+ * - validate that all servers are in normal working state. Can be skipped with option
+ * - validate all volumes are replicated. If there is a non-replicated volume stop update. Can be skipped with option
+ * - run update state on one server at a time
+ */
+
+timeout(time: 12, unit: 'HOURS') {
+  node() {
+    try {
+
+      stage('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+      }
+
+      stage('List target servers') {
+        all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+        if (all_minions.isEmpty()) {
+          throw new Exception("No minion was targeted")
+        }
+        minions = []
+        for (minion in all_minions) {
+          latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+          current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+          if (latest_version != current_version) {
+            minions.add(minion)
+          } else {
+            common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+          }
+        }
+      }
+      if (!minions.isEmpty()) {
+        if (!IGNORE_SERVER_STATUS.toBoolean()){
+          stage('Validate servers availability') {
+            salt.commandStatus(pEnv, TARGET_SERVERS, "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+            common.successMsg("All servers are available")
+          }
+        } else {
+          common.warningMsg("Check of servers availability has been disabled")
+        }
+        if (!IGNORE_NON_REPLICATED_VOLUMES.toBoolean()){
+          stage('Check that all volumes are replicated') {
+            salt.commandStatus(pEnv, TARGET_SERVERS, "gluster volume info | fgrep 'Type:' | fgrep -v Replicate", null, false, true, null, true, 1)
+            common.successMsg("All volumes are replicated")
+          }
+        } else {
+          common.warningMsg("Check of volumes' replication has been disabled. Be aware, you may lost data during update!")
+        }
+        // Actual update
+        for (tgt in minions) {
+          stage("Update glusterfs on ${tgt}") {
+            salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.server'])
+          }
+        }
+      } else {
+        common.warningMsg("No hosts to update glusterfs on")
+      }
+    } catch (Throwable e) {
+      // If there was an error or exception thrown, the build failed
+      currentBuild.result = "FAILURE"
+      currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+      salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+      throw e
+    }
+  }
+}
diff --git a/update-glusterfs.groovy b/update-glusterfs.groovy
new file mode 100644
index 0000000..67d3341
--- /dev/null
+++ b/update-glusterfs.groovy
@@ -0,0 +1,81 @@
+/**
+ * Complete update glusterfs pipeline
+ *
+ * Expected parameters:
+ *   DRIVE_TRAIN_PARAMS         Yaml, DriveTrain releated params:
+ *     SALT_MASTER_CREDENTIALS              Credentials to the Salt API
+ *     SALT_MASTER_URL                      Full Salt API address [https://10.10.10.1:8000]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+  value = params[key]
+  env.setProperty(key, value)
+}
+
+def waitGerrit(salt_target, wait_timeout) {
+  def salt = new com.mirantis.mk.Salt()
+  def common = new com.mirantis.mk.Common()
+  def python = new com.mirantis.mk.Python()
+  def pEnv = "pepperEnv"
+  python.setupPepperVirtualenv(pEnv, env.SALT_MASTER_URL, env.SALT_MASTER_CREDENTIALS)
+
+  salt.fullRefresh(pEnv, salt_target)
+
+  def gerrit_master_url = salt.getPillar(pEnv, salt_target, '_param:gerrit_master_url')
+
+  if(!gerrit_master_url['return'].isEmpty()) {
+    gerrit_master_url = gerrit_master_url['return'][0].values()[0]
+  } else {
+    gerrit_master_url = ''
+  }
+
+  if (gerrit_master_url != '') {
+    common.infoMsg('Gerrit master url "' + gerrit_master_url + '" retrieved at _param:gerrit_master_url')
+  } else {
+    common.infoMsg('Gerrit master url could not be retrieved at _param:gerrit_master_url. Falling back to gerrit pillar')
+
+    def gerrit_host
+    def gerrit_http_port
+    def gerrit_http_scheme
+    def gerrit_http_prefix
+
+    def host_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:host')
+    gerrit_host = salt.getReturnValues(host_pillar)
+
+    def port_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:http_port')
+    gerrit_http_port = salt.getReturnValues(port_pillar)
+
+    def scheme_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:protocol')
+    gerrit_http_scheme = salt.getReturnValues(scheme_pillar)
+
+    def prefix_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:url_prefix')
+    gerrit_http_prefix = salt.getReturnValues(prefix_pillar)
+
+    gerrit_master_url = gerrit_http_scheme + '://' + gerrit_host + ':' + gerrit_http_port + gerrit_http_prefix
+
+  }
+
+  timeout(wait_timeout) {
+    common.infoMsg('Waiting for Gerrit to come up..')
+    def check_gerrit_cmd = 'while true; do curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + gerrit_master_url + '/ | grep 200 && break || sleep 1; done'
+    salt.cmdRun(pEnv, salt_target, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c -- ' + '"' + check_gerrit_cmd + '"')
+  }
+}
+
+node() {
+  stage('Update glusterfs servers') {
+    build(job: 'update-glusterfs-servers')
+  }
+  sleep 180
+  stage('Update glusterfs clients') {
+    build(job: 'update-glusterfs-clients')
+  }
+}
+node() {
+  waitGerrit('I@gerrit:client', 300)
+  stage('Update glusterfs cluster.op-version') {
+    build(job: 'update-glusterfs-cluster-op-version')
+  }
+}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index c34215d..ac63e53 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -44,7 +44,7 @@
 }
 
 def updateSaltStack(target, pkgs) {
-    salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    salt.cmdRun(venvPepper, 'I@salt:master', "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
     // can't use same function from pipeline lib, as at the moment of running upgrade pipeline Jenkins
     // still using pipeline lib from current old mcp-version
     common.retry(20, 60) {
@@ -219,6 +219,25 @@
     }
 }
 
+def checkDebsums() {
+    // check for salt-formulas consistency
+    try {
+        try {
+            salt.cmdRun(venvPepper, 'I@salt:master', "salt -C 'I@salt:master' pkg.install force_yes=True pkgs=[debsums]")
+        }
+        catch (Exception ex) {
+            common.warningMsg('Unable to install package "debsums" at cfg01. Salt-formulas integrity check skipped')
+        }
+        salt.cmdRun(venvPepper, 'I@salt:master', '> /root/debdsums_report; for i in $(dpkg-query -W -f=\'${Package}\\n\' | sed "s/ //g" |grep \'salt-formula-\'); do debsums -s ${i} 2>> /root/debdsums_report; done')
+        salt.cmdRun(venvPepper, 'I@salt:master', 'if [ -s "/root/debdsums_report" ]; then exit 1 ; fi')
+    }
+    catch (Exception ex) {
+        common.errorMsg(salt.cmdRun(venvPepper, 'I@salt:master', 'cat /root/debdsums_report ', true, null, false).get('return')[0].values()[0].trim())
+        common.errorMsg(ex.toString())
+        error('You have unexpected changes in formulas. All of them will be overwrited by update. Unable to continue in automatic way')
+    }
+}
+
 if (common.validInputParam('PIPELINE_TIMEOUT')) {
     try {
         pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
@@ -228,7 +247,7 @@
 }
 
 timeout(time: pipelineTimeout, unit: 'HOURS') {
-    node("python") {
+    node('python') {
         try {
             def inventoryBeforeFilename = "reclass-inventory-before.out"
             def inventoryAfterFilename = "reclass-inventory-after.out"
@@ -288,6 +307,10 @@
             }
             python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
             def minions = salt.getMinions(venvPepper, '*')
+            def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
+            if (cluster_name == '' || cluster_name == 'null' || cluster_name == null) {
+                error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
+            }
 
             stage('Update Reclass and Salt-Formulas') {
                 common.infoMsg('Perform: Full salt sync')
@@ -298,13 +321,13 @@
                 common.infoMsg('Perform: archiveReclassInventory before upgrade')
                 archiveReclassInventory(inventoryBeforeFilename)
 
-                def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', '_param:cluster_name').get('return')[0].values()[0]
                 try {
                     salt.cmdRun(venvPepper, 'I@salt:master', 'cd /srv/salt/reclass/ && git status && git diff-index --quiet HEAD --')
                 }
                 catch (Exception ex) {
                     error('You have uncommitted changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.')
                 }
+                checkDebsums()
                 if (updateClusterModel) {
                     common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
                     def dateTime = common.getDatetime()
@@ -362,9 +385,9 @@
                     salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
                         "sed -i 's|^classes:|classes:\\n- system.defaults|' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
-                            "grep -r -l 'docker_image_jenkins: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins: .*||g'")
+                        "grep -r -l 'docker_image_jenkins: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins: .*||g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
-                            "grep -r -l 'docker_image_jenkins_slave: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins_slave: .*||g'")
+                        "grep -r -l 'docker_image_jenkins_slave: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins_slave: .*||g'")
                     common.infoMsg("The following changes were made to the cluster model and will be commited. " +
                         "Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
@@ -374,7 +397,7 @@
                 try {
                     common.infoMsg('Perform: UPDATE Salt Formulas')
                     salt.fullRefresh(venvPepper, '*')
-                    salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
+                    salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo', true, true, null, false, 60, 2)
                     def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
                     salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas", 'salt.master.env'])
                     salt.fullRefresh(venvPepper, '*')
@@ -397,12 +420,13 @@
                 }
 
                 salt.fullRefresh(venvPepper, 'I@salt:master')
-                salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true)
+                salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true, true, null, false, 60, 2)
                 try {
-                    salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
+                    salt.enforceState(venvPepper, 'I@salt:master', 'reclass', true, true, null, false, 60, 2)
                 }
                 catch (Exception ex) {
-                    error("Reclass fails rendering. Pay attention to your cluster model.")
+                    common.errorMsg(ex.toString())
+                    error('Reclass fails rendering. Pay attention to your cluster model.')
                 }
 
                 salt.fullRefresh(venvPepper, '*')
@@ -466,10 +490,10 @@
 
             stage('Update Drivetrain') {
                 if (upgradeSaltStack) {
-                    updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                    updateSaltStack('I@salt:master', '["salt-master", "salt-common", "salt-api", "salt-minion"]')
 
-                    salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
-                    updateSaltStack("I@salt:minion and not I@salt:master", '["salt-minion"]')
+                    salt.enforceState(venvPepper, 'I@linux:system', 'linux.system.repo', true, true, null, false, 60, 2)
+                    updateSaltStack('I@salt:minion and not I@salt:master', '["salt-minion"]')
                 }
 
                 if (updatePipelines) {
@@ -479,23 +503,37 @@
                 }
 
                 // update minions certs
-                salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
+                // call for `salt.minion.ca` state on related nodes to make sure
+                // mine was updated with required data after salt-minion/salt-master restart salt:minion:ca
+                salt.enforceState(venvPepper, 'I@salt:minion:ca', 'salt.minion.ca', true, true, null, false, 60, 2)
+                salt.enforceState(venvPepper, 'I@salt:minion', 'salt.minion.cert', true, true, null, false, 60, 2)
 
+                // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
+                salt.enforceState(venvPepper, 'I@salt:minion', 'salt.minion', true, true, null, false, 60, 2)
                 // Retry needed only for rare race-condition in user appearance
                 common.infoMsg('Perform: updating users and keys')
-                salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)
+                salt.enforceState(venvPepper, 'I@linux:system', 'linux.system.user', true, true, null, false, 60, 2)
                 common.infoMsg('Perform: updating openssh')
-                salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
+                salt.enforceState(venvPepper, 'I@linux:system', 'openssh', true, true, null, false, 60, 2)
 
-                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
-                salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
+                // apply salt API TLS if needed
+                def nginxAtMaster = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:enabled').get('return')[0].values()[0]
+                if (nginxAtMaster.toString().toLowerCase() == 'true') {
+                    salt.enforceState(venvPepper, 'I@salt:master', 'nginx', true, true, null, false, 60, 2)
+                }
+
+                // Apply changes for HaProxy on CI/CD nodes
+                salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
+
+                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true, true, null, false, 60, 2)
+                salt.cmdRun(venvPepper, 'I@salt:master', "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
 
                 sleep(180)
 
                 common.infoMsg('Perform: Checking if Docker containers are up')
 
                 try {
-                    common.retry(10, 30) {
+                    common.retry(20, 30) {
                         salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
                     }
                 }