Merge "[CVP] Fixing path to report and xpath for cvp-spt" into release/proposed/2019.2.0
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 6d099b7..b9649d5 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -61,7 +61,8 @@
                 def env_vars_list  =  [
                     "SALT_USERNAME=${creds.username}",
                     "SALT_PASSWORD=${creds.password}",
-                    "SALT_URL=${SALT_MASTER_URL}"
+                    "SALT_URL=${SALT_MASTER_URL}",
+                    "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
                     ] + env_vars
 
                 // Generating final config
@@ -72,6 +73,7 @@
                     'dockerExtraOpts' : [
                         "--network=host",
                         "-v /root/qa_results/:/root/qa_results/",
+                        "-v /etc/ssl/certs/:/etc/ssl/certs/:ro",
                         "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
                     ],
                     'envOpts'         : env_vars_list,
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index b169ab5..c311186 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -19,6 +19,8 @@
  *   TEMPEST_ENDPOINT_TYPE       Type of OS endpoint to use during test run (not in use right now)
  *   concurrency                 Number of threads to use for Tempest test run
  *   remote_artifacts_dir        Folder to use for artifacts on remote node
+ *   runtest_tempest_cfg_dir     Folder to use to generate and store tempest.conf
+ *   runtest_tempest_cfg_name    Tempest config name
  *   report_prefix               Some prefix to put to report name
  *
  */
@@ -37,91 +39,109 @@
 def DEBUG_MODE = (env.DEBUG_MODE) ?: false
 def STOP_ON_ERROR = (env.STOP_ON_ERROR) ? env.STOP_ON_ERROR.toBoolean() : false
 def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+// do not change unless you know what you're doing
 def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
 def report_prefix = (env.report_prefix) ?: ''
 def args = ''
+def mounts = [:]
 node() {
-    try{
-        stage('Initialization') {
-            deleteDir()
-            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-            container_name = "${env.JOB_NAME}"
-            cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
-            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
-            if (!os_version) {
-                throw new Exception("Openstack is not found on this env. Exiting")
-            }
-            TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
-            runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
-            if (runtest_node.values()[0]) {
-                // Let's use Service node that was defined in reclass. If several nodes are defined
-                // we will use the first from salt output
-                common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
-                SERVICE_NODE = runtest_node.keySet()[0]
-            }
-            else {
-                throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
-                                    "into reclass. Check documentation for more details")
-            }
-            common.infoMsg('Refreshing pillars on service node')
-            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
-            tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
+    stage('Initialization') {
+        deleteDir()
+        saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        container_name = "${env.JOB_NAME}"
+        cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+        os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+        if (!os_version) {
+            throw new Exception("Openstack is not found on this env. Exiting")
         }
-        stage('Preparing resources') {
-            if ( PREPARE_RESOURCES.toBoolean() ) {
-                common.infoMsg('Running salt.minion state on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
-                common.infoMsg('Running keystone.client on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
-                common.infoMsg('Running glance.client on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
-                common.infoMsg('Running nova.client on service node')
-                salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
-            }
-            else {
-                common.infoMsg('Skipping resources preparation')
-            }
+        TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+        runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+        if (runtest_node.values()[0]) {
+            // Let's use Service node that was defined in reclass. If several nodes are defined
+            // we will use the first from salt output
+            common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+            SERVICE_NODE = runtest_node.keySet()[0]
         }
-        stage('Generate config') {
-            if ( GENERATE_CONFIG.toBoolean() ) {
-                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
-                salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
-                fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
-                TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
-                if (TARGET_NODE != tempest_node) {
-                    common.infoMsg("TARGET_NODE is defined in Jenkins")
-                    def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
-                    common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
-                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
-                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
-                    salt.checkResult(result)
-                }
-                common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
-                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
-                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+        else {
+            throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+                                "into reclass. Check documentation for more details")
+        }
+        common.infoMsg('Refreshing pillars on service node')
+        salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+        // default node is cid01 (preferably) or cfg01
+        default_node=salt.getPillar(saltMaster, 'I@salt:master', '_param:cicd_control_node01_hostname')['return'][0].values()[0] ?: 'cfg01'
+        // fetch tempest_test_target from runtest.yaml, otherwise fallback to default_node
+        tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: default_node+'*'
+        // TARGET_NODE will always override any settings above
+        TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+    }
+    stage('Preparing resources') {
+        if ( PREPARE_RESOURCES.toBoolean() ) {
+            common.infoMsg('Running salt.minion state on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+            common.infoMsg('Running keystone.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+            common.infoMsg('Running glance.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+            common.infoMsg('Running nova.client on service node')
+            salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+        }
+        else {
+            common.infoMsg('Skipping resources preparation')
+        }
+    }
+    stage('Generate config') {
+        if ( GENERATE_CONFIG.toBoolean() ) {
+            // default is /root/test/
+            runtest_tempest_cfg_dir = (env.runtest_tempest_cfg_dir) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0]
+            // default is tempest_generated.conf
+            runtest_tempest_cfg_name = (env.runtest_tempest_cfg_name) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_name')['return'][0].values()[0]
+            common.infoMsg("runtest_tempest_cfg is ${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}")
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${runtest_tempest_cfg_dir}"])
+            salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${runtest_tempest_cfg_dir}"])
+            fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+            if (TARGET_NODE != tempest_node) {
+                common.infoMsg("TARGET_NODE is defined in Jenkins")
+                def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+                common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
+                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                             null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
+                salt.checkResult(result)
+            }
+            common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+            // runtest state hangs if tempest_test_target is cfg01*
+            // let's run runtest.generate_tempest_config only for this case
+            if (TARGET_NODE == 'cfg01*') {
+                common.warningMsg("It is not recommended to run Tempest container on cfg node, but.. proceeding")
+                salt.enforceState(saltMaster, SERVICE_NODE, 'runtest.generate_tempest_config', VERBOSE, STOP_ON_ERROR)
+            } else {
                 salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
-                // we need to refresh pillars on target node after runtest state
-                salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
-                if (TARGET_NODE != tempest_node) {
-                    common.infoMsg("Reverting tempest_test_target parameter")
-                    result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
-                                                 null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
-                }
-                SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
-                runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
-                if (SKIP_LIST_PATH) {
-                    salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
-                    args += ' --blacklist-file /root/tempest/skip.list '
-                }
             }
-            else {
-                common.infoMsg('Skipping Tempest config generation')
-                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+            // we need to refresh pillars on target node after runtest state
+            salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+            if (TARGET_NODE != tempest_node) {
+                common.infoMsg("Reverting tempest_test_target parameter")
+                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+                                             null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+            }
+            SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+            if (SKIP_LIST_PATH) {
+                mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/root/tempest/skip.list"]
+                salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+                args += ' --blacklist-file /root/tempest/skip.list '
             }
         }
+        else {
+            common.infoMsg('Skipping Tempest config generation')
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+        }
+    }
 
+    try{
         stage('Run Tempest tests') {
-            mounts = ['/root/test/tempest_generated.conf': '/etc/tempest/tempest.conf']
+            mounts = mounts + ["${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}": "/etc/tempest/tempest.conf"]
             validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
                                   mounts: mounts, name: container_name)
             report_prefix += 'tempest_'
@@ -135,7 +155,7 @@
             else {
                 if (TEMPEST_TEST_PATTERN != 'set=full') {
                     args += " -r ${TEMPEST_TEST_PATTERN} "
-                    report_prefix += 'full'
+                    report_prefix += 'custom'
                 }
             }
             salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
@@ -150,10 +170,6 @@
             archiveArtifacts artifacts: "${report_prefix}.*"
             junit "${report_prefix}.xml"
         }
-    } catch (Throwable e) {
-        // If there was an error or exception thrown, the build failed
-        currentBuild.result = "FAILURE"
-        throw e
     } finally {
         if (DEBUG_MODE == 'false') {
             validate.runCleanup(saltMaster, TARGET_NODE, container_name)
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 7c932d0..75e2e40 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -70,7 +70,7 @@
             common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
         }
 
-        pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:scheme'))
+        pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:scheme'))
         def elasticsearch_scheme
         if(pillar) {
             elasticsearch_scheme = pillar
diff --git a/update-ceph.groovy b/update-ceph.groovy
index cde1401..72ac2d5 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -24,11 +24,11 @@
     return salt.cmdRun(master, target, cmd)
 }
 
-def waitForHealthy(master, tgt, attempts=100, timeout=10) {
+def waitForHealthy(master, tgt, count=0, attempts=100) {
     // wait for healthy cluster
     common = new com.mirantis.mk.Common()
     while (count<attempts) {
-        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+        def health = runCephCommand(master, tgt, 'ceph health')['return'][0].values()[0]
         if (health.contains('HEALTH_OK') || health.contains('HEALTH_WARN noout flag(s) set\n')) {
             common.infoMsg('Cluster is healthy')
             break;
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index bff2589..b42ec7e 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -44,7 +44,7 @@
 }
 
 def updateSaltStack(target, pkgs) {
-    salt.cmdRun(venvPepper, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
+    salt.cmdRun(venvPepper, 'I@salt:master', "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
     // can't use same function from pipeline lib, as at the moment of running upgrade pipeline Jenkins
     // still using pipeline lib from current old mcp-version
     common.retry(20, 60) {
@@ -219,6 +219,25 @@
     }
 }
 
+def checkDebsums() {
+    // check for salt-formulas consistency
+    try {
+        try {
+            salt.cmdRun(venvPepper, 'I@salt:master', "salt -C 'I@salt:master' pkg.install force_yes=True pkgs=[debsums]")
+        }
+        catch (Exception ex) {
+            common.warningMsg('Unable to install package "debsums" at cfg01. Salt-formulas integrity check skipped')
+        }
+        salt.cmdRun(venvPepper, 'I@salt:master', '> /root/debdsums_report; for i in $(dpkg-query -W -f=\'${Package}\\n\' | sed "s/ //g" |grep \'salt-formula-\'); do debsums -s ${i} 2>> /root/debdsums_report; done')
+        salt.cmdRun(venvPepper, 'I@salt:master', 'if [ -s "/root/debdsums_report" ]; then exit 1 ; fi')
+    }
+    catch (Exception ex) {
+        common.errorMsg(salt.cmdRun(venvPepper, 'I@salt:master', 'cat /root/debdsums_report ', true, null, false).get('return')[0].values()[0].trim())
+        common.errorMsg(ex.toString())
+        error('You have unexpected changes in formulas. All of them will be overwrited by update. Unable to continue in automatic way')
+    }
+}
+
 if (common.validInputParam('PIPELINE_TIMEOUT')) {
     try {
         pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
@@ -228,7 +247,7 @@
 }
 
 timeout(time: pipelineTimeout, unit: 'HOURS') {
-    node("python") {
+    node('python') {
         try {
             def inventoryBeforeFilename = "reclass-inventory-before.out"
             def inventoryAfterFilename = "reclass-inventory-after.out"
@@ -305,6 +324,7 @@
                 catch (Exception ex) {
                     error('You have uncommitted changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.')
                 }
+                checkDebsums()
                 if (updateClusterModel) {
                     common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
                     def dateTime = common.getDatetime()
@@ -362,9 +382,9 @@
                     salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
                         "sed -i 's|^classes:|classes:\\n- system.defaults|' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
-                            "grep -r -l 'docker_image_jenkins: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins: .*||g'")
+                        "grep -r -l 'docker_image_jenkins: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins: .*||g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
-                            "grep -r -l 'docker_image_jenkins_slave: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins_slave: .*||g'")
+                        "grep -r -l 'docker_image_jenkins_slave: .*' cicd | xargs --no-run-if-empty sed -i 's|\\s*docker_image_jenkins_slave: .*||g'")
                     common.infoMsg("The following changes were made to the cluster model and will be commited. " +
                         "Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
@@ -374,7 +394,7 @@
                 try {
                     common.infoMsg('Perform: UPDATE Salt Formulas')
                     salt.fullRefresh(venvPepper, '*')
-                    salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
+                    salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['linux.system.repo'], read_timeout: 60, retries: 2])
                     def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
                     salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas", 'salt.master.env'])
                     salt.fullRefresh(venvPepper, '*')
@@ -397,12 +417,13 @@
                 }
 
                 salt.fullRefresh(venvPepper, 'I@salt:master')
-                salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true)
+                salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['reclass.storage'], read_timeout: 60, retries: 2])
                 try {
-                    salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
+                    salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['reclass'], read_timeout: 60, retries: 2])
                 }
                 catch (Exception ex) {
-                    error("Reclass fails rendering. Pay attention to your cluster model.")
+                    common.errorMsg(ex.toString())
+                    error('Reclass fails rendering. Pay attention to your cluster model.')
                 }
 
                 salt.fullRefresh(venvPepper, '*')
@@ -466,10 +487,10 @@
 
             stage('Update Drivetrain') {
                 if (upgradeSaltStack) {
-                    updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                    updateSaltStack('I@salt:master', '["salt-master", "salt-common", "salt-api", "salt-minion"]')
 
-                    salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
-                    updateSaltStack("I@salt:minion and not I@salt:master", '["salt-minion"]')
+                    salt.enforceState([saltId: venvPepper, target: 'I@linux:system', state: ['linux.system.repo'], read_timeout: 60, retries: 2])
+                    updateSaltStack('I@salt:minion and not I@salt:master', '["salt-minion"]')
                 }
 
                 if (updatePipelines) {
@@ -481,26 +502,32 @@
                 // update minions certs
                 // call for `salt.minion.ca` state on related nodes to make sure
                 // mine was updated with required data after salt-minion/salt-master restart salt:minion:ca
-                salt.enforceState(venvPepper, "I@salt:minion:ca", 'salt.minion.ca', true)
-                salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
+                salt.enforceState([saltId: venvPepper, target: 'I@salt:minion:ca', state: ['salt.minion.ca'], read_timeout: 60, retries: 2])
+                salt.enforceState([saltId: venvPepper, target: 'I@salt:minion', state: ['salt.minion.cert'], read_timeout: 60, retries: 2])
 
                 // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
-                salt.enforceState([saltId: venvPepper, target: "I@salt:minion ${extra_tgt}", state: ['salt.minion'], read_timeout: 60, retries: 2])
+                salt.enforceState([saltId: venvPepper, target: 'I@salt:minion', state: ['salt.minion'], read_timeout: 60, retries: 2])
                 // Retry needed only for rare race-condition in user appearance
                 common.infoMsg('Perform: updating users and keys')
-                salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)
+                salt.enforceState([saltId: venvPepper, target: 'I@linux:system', state: ['linux.system.user'], read_timeout: 60, retries: 2])
                 common.infoMsg('Perform: updating openssh')
-                salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
+                salt.enforceState([saltId: venvPepper, target: 'I@linux:system', state: ['openssh'], read_timeout: 60, retries: 2])
 
-                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
-                salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
+                // apply salt API TLS if needed
+                def nginxAtMaster = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:enabled').get('return')[0].values()[0]
+                if (nginxAtMaster.toString().toLowerCase() == 'true') {
+                    salt.enforceState([saltId: venvPepper, target: 'I@salt:master', state: ['nginx'], read_timeout: 60, retries: 2])
+                }
+
+                salt.enforceState([saltId: venvPepper, target: 'I@jenkins:client and not I@salt:master', state: ['jenkins.client'], read_timeout: 60, retries: 2])
+                salt.cmdRun(venvPepper, 'I@salt:master', "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
 
                 sleep(180)
 
                 common.infoMsg('Perform: Checking if Docker containers are up')
 
                 try {
-                    common.retry(10, 30) {
+                    common.retry(20, 30) {
                         salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
                     }
                 }