Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0
b8b7f92 Make runPepper more resistant against SaltReqTimeoutError
4541e95 Fix enforce salt.minion state to run twice
a2e599a Enforce salt.minion state to run twice
1c6254f Change Heka installation condidtion in SL pipeline
f59d91a Added retries to the grafana.client part in SL block
b445d53 Fix getting pillar values for ES health check
792966a Replace runSaltProcessStep by cmdRun
5140ad3 [CVP,Q4] Better handling of offline minion situation for cvp methods
36b34c7 Add elasticsearch scheme var to status check
d74bf1d [CVP,Q4] Backport runContainer method for new cvp-func job
00608bc Change way to get jenkins master url
93e3198 [CVP] Fix cleanup execution for cvp jobs
e0326f6 [CVP] Fix tools_repo offline usage
365eba6 [CVP] Fix cvp-perf job for xrally 0.11.2
d2bb3f2 [CVP] Replace hardcode in runCVPrally method
f1390b6 [CVP] Add common to configureContainer method
Change-Id: I937976e84dea6552e1e3d5f319e10ddb039916fd
diff --git a/src/com/mirantis/mcp/Validate.groovy b/src/com/mirantis/mcp/Validate.groovy
index 23d974b..25f4465 100644
--- a/src/com/mirantis/mcp/Validate.groovy
+++ b/src/com/mirantis/mcp/Validate.groovy
@@ -33,34 +33,55 @@
/**
* Run docker container with parameters
*
- * @param target Host to run container
- * @param dockerImageLink Docker image link. May be custom or default rally image
- * @param name Name for container
- * @param env_var Environment variables to set in container
- * @param entrypoint Set entrypoint to /bin/bash or leave default
+ * @param target Host to run container
+ * @param dockerImageLink Docker image link. May be custom or default rally image
+ * @param name Name for container
+ * @param env_var Environment variables to set in container
+ * @param entrypoint Set entrypoint to /bin/bash or leave default
+ * @param mounts Map with mounts for container
**/
-
-def runContainer(master, target, dockerImageLink, name='cvp', env_var=[], entrypoint=true){
- def salt = new com.mirantis.mk.Salt()
+def runContainer(Map params){
def common = new com.mirantis.mk.Common()
+ defaults = ["name": "cvp", "env_var": [], "entrypoint": true]
+ params = defaults + params
+ def salt = new com.mirantis.mk.Salt()
def variables = ''
def entry_point = ''
- def cluster_name = salt.getPillar(master, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
- if ( salt.cmdRun(master, target, "docker ps -f name=${name} -q", false, null, false)['return'][0].values()[0] ) {
- salt.cmdRun(master, target, "docker rm -f ${name}")
+ def tempest_conf_mount = ''
+ def mounts = ''
+ def cluster_name = salt.getPillar(params.master, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+ default_mounts = ["/etc/ssl/certs/": "/etc/ssl/certs/",
+ "/srv/salt/pki/${cluster_name}/": "/etc/certs",
+ "/root/test/": "/root/tempest/",
+ "/tmp/": "/tmp/",
+ "/etc/hosts": "/etc/hosts"]
+ params.mounts = default_mounts + params.mounts
+ if ( salt.cmdRun(params.master, params.target, "docker ps -f name=${params.name} -q", false, null, false)['return'][0].values()[0] ) {
+ salt.cmdRun(params.master, params.target, "docker rm -f ${params.name}")
}
- if (env_var.size() > 0) {
- variables = ' -e ' + env_var.join(' -e ')
+ if (params.env_var.size() > 0) {
+ variables = ' -e ' + params.env_var.join(' -e ')
}
- if (entrypoint) {
+ if (params.entrypoint) {
entry_point = '--entrypoint /bin/bash'
}
- salt.cmdRun(master, target, "docker run -tid --net=host --name=${name} " +
- "-u root ${entry_point} ${variables} " +
- "-v /srv/salt/pki/${cluster_name}/:/etc/certs ${dockerImageLink}")
+ params.mounts.each { local, container ->
+ mounts = mounts + " -v ${local}:${container}"
+ }
+ salt.cmdRun(params.master, params.target, "docker run -tid --net=host --name=${params.name}" +
+ "${mounts} -u root ${entry_point} ${variables} ${params.dockerImageLink}")
}
+def runContainer(master, target, dockerImageLink, name='cvp', env_var=[], entrypoint=true, mounts=[:]){
+ def common = new com.mirantis.mk.Common()
+ common.infoMsg("This method will be deprecated. Convert you method call to use Map as input parameter")
+ // Convert to Map
+ params = ['master': master, 'target': target, 'dockerImageLink': dockerImageLink, 'name': name, 'env_var': env_var,
+ 'entrypoint': entrypoint, 'mounts': mounts]
+ // Call new method with Map as parameter
+ return runContainer(params)
+}
/**
* Get v2 Keystone credentials from pillars
@@ -70,15 +91,26 @@
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
def keystone = []
+ _pillar = false
common.infoMsg("Fetching Keystone v2 credentials")
- _pillar = salt.getPillar(master, 'I@keystone:server', 'keystone:server')['return'][0].values()[0]
- keystone.add("OS_USERNAME=${_pillar.admin_name}")
- keystone.add("OS_PASSWORD=${_pillar.admin_password}")
- keystone.add("OS_TENANT_NAME=${_pillar.admin_tenant}")
- keystone.add("OS_AUTH_URL=http://${_pillar.bind.private_address}:${_pillar.bind.private_port}/v2.0")
- keystone.add("OS_REGION_NAME=${_pillar.region}")
- keystone.add("OS_ENDPOINT_TYPE=admin")
- return keystone
+ _response = salt.runSaltProcessStep(master, 'I@keystone:server', 'pillar.get', 'keystone:server', null, false, 1)['return'][0]
+ for (i = 0; i < _response.keySet().size(); i++) {
+ if ( _response.values()[i] ) {
+ _pillar = _response.values()[i]
+ }
+ }
+ if (_pillar) {
+ keystone.add("OS_USERNAME=${_pillar.admin_name}")
+ keystone.add("OS_PASSWORD=${_pillar.admin_password}")
+ keystone.add("OS_TENANT_NAME=${_pillar.admin_tenant}")
+ keystone.add("OS_AUTH_URL=http://${_pillar.bind.private_address}:${_pillar.bind.private_port}/v2.0")
+ keystone.add("OS_REGION_NAME=${_pillar.region}")
+ keystone.add("OS_ENDPOINT_TYPE=admin")
+ return keystone
+ }
+ else {
+ throw new Exception("Cannot fetch Keystone v2 credentials. Response: ${_response}")
+ }
}
/**
@@ -88,9 +120,15 @@
def _get_keystone_creds_v3(master){
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
+ _pillar = false
pillar_name = 'keystone:client:os_client_config:cfgs:root:content:clouds:admin_identity'
common.infoMsg("Fetching Keystone v3 credentials")
- def _pillar = salt.getPillar(master, 'I@keystone:client', pillar_name)['return'][0].values()[0]
+ _response = salt.runSaltProcessStep(master, 'I@keystone:server', 'pillar.get', pillar_name, null, false, 1)['return'][0]
+ for (i = 0; i < _response.keySet().size(); i++) {
+ if ( _response.values()[i] ) {
+ _pillar = _response.values()[i]
+ }
+ }
def keystone = []
if (_pillar) {
keystone.add("OS_USERNAME=${_pillar.auth.username}")
@@ -634,26 +672,30 @@
* @param tempest_version Version of tempest to use. This value will be just passed to configure.sh script (cvp-configuration repo).
* @param conf_script_path Path to configuration script.
* @param ext_variables Some custom extra variables to add into container
+ * @param container_name Name of container to use
*/
def configureContainer(master, target, proxy, testing_tools_repo, tempest_repo,
tempest_endpoint_type="internalURL", tempest_version="",
- conf_script_path="", ext_variables = []) {
+ conf_script_path="", ext_variables = [], container_name="cvp") {
def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
if (testing_tools_repo != "" ) {
+ workdir = ''
if (testing_tools_repo.contains('http://') || testing_tools_repo.contains('https://')) {
- salt.cmdRun(master, target, "docker exec cvp git clone ${testing_tools_repo} cvp-configuration")
+ salt.cmdRun(master, target, "docker exec ${container_name} git clone ${testing_tools_repo} cvp-configuration")
configure_script = conf_script_path != "" ? conf_script_path : "cvp-configuration/configure.sh"
}
else {
configure_script = testing_tools_repo
+ workdir = ' -w /var/lib/'
}
ext_variables.addAll("PROXY=${proxy}", "TEMPEST_REPO=${tempest_repo}",
"TEMPEST_ENDPOINT_TYPE=${tempest_endpoint_type}",
"tempest_version=${tempest_version}")
- salt.cmdRun(master, target, "docker exec -e " + ext_variables.join(' -e ') + " cvp bash -c ${configure_script}")
+ salt.cmdRun(master, target, "docker exec -e " + ext_variables.join(' -e ') + " ${workdir} ${container_name} bash -c ${configure_script}")
}
else {
- common.infoMsg("TOOLS_REPO is empty, no confguration is needed for container")
+ common.infoMsg("TOOLS_REPO is empty, no configuration is needed for this container")
}
}
@@ -689,16 +731,17 @@
* @param test_pattern Test pattern to run
* @param scenarios_path Path to Rally scenarios
* @param output_dir Directory on target host for storing results (containers is not a good place)
+ * @param container_name Name of container to use
*/
-def runCVPrally(master, target, scenarios_path, output_dir, output_filename="docker-rally") {
+def runCVPrally(master, target, scenarios_path, output_dir, output_filename="docker-rally", container_name="cvp") {
def salt = new com.mirantis.mk.Salt()
def xml_file = "${output_filename}.xml"
def html_file = "${output_filename}.html"
- salt.cmdRun(master, target, "docker exec cvp rally task start ${scenarios_path}")
- salt.cmdRun(master, target, "docker exec cvp rally task report --out ${html_file}")
- salt.cmdRun(master, target, "docker exec cvp rally task report --junit --out ${xml_file}")
- salt.cmdRun(master, target, "docker cp cvp:/home/rally/${xml_file} ${output_dir}")
- salt.cmdRun(master, target, "docker cp cvp:/home/rally/${html_file} ${output_dir}")
+ salt.cmdRun(master, target, "docker exec ${container_name} rally task start ${scenarios_path}", false)
+ salt.cmdRun(master, target, "docker exec ${container_name} rally task report --out /home/rally/${html_file}", false)
+ salt.cmdRun(master, target, "docker exec ${container_name} rally task report --junit --out /home/rally/${xml_file}", false)
+ salt.cmdRun(master, target, "docker cp ${container_name}:/home/rally/${xml_file} ${output_dir}")
+ salt.cmdRun(master, target, "docker cp ${container_name}:/home/rally/${html_file} ${output_dir}")
}
@@ -805,10 +848,12 @@
* Find vip on nodes
*
* @param target Host with cvp container
+ * @param container_name Name of container
+ * @param script_path Path to cleanup script (inside container)
*/
-def openstack_cleanup(master, target, script_path="/home/rally/cvp-configuration/cleanup.sh") {
+def openstack_cleanup(master, target, container_name="cvp", script_path="/home/rally/cleanup.sh") {
def salt = new com.mirantis.mk.Salt()
- salt.runSaltProcessStep(master, "${target}", 'cmd.run', ["docker exec cvp bash -c ${script_path}"])
+ salt.runSaltProcessStep(master, "${target}", 'cmd.run', ["docker exec ${container_name} bash -c ${script_path}"])
}
diff --git a/src/com/mirantis/mk/Common.groovy b/src/com/mirantis/mk/Common.groovy
index aecbc9b..296dd38 100644
--- a/src/com/mirantis/mk/Common.groovy
+++ b/src/com/mirantis/mk/Common.groovy
@@ -505,18 +505,19 @@
*/
def shCmdStatus(cmd) {
+ // Set +x , to hide odd messages about temp file manipulations
def res = [:]
- def stderr = sh(script: 'mktemp', returnStdout: true).trim()
- def stdout = sh(script: 'mktemp', returnStdout: true).trim()
+ def stderr = sh(script: 'set +x ; mktemp', returnStdout: true).trim()
+ def stdout = sh(script: 'set +x ; mktemp', returnStdout: true).trim()
try {
def status = sh(script: "${cmd} 1>${stdout} 2>${stderr}", returnStatus: true)
- res['stderr'] = sh(script: "cat ${stderr}", returnStdout: true)
- res['stdout'] = sh(script: "cat ${stdout}", returnStdout: true)
+ res['stderr'] = sh(script: "set +x; cat ${stderr}", returnStdout: true).trim()
+ res['stdout'] = sh(script: "set +x; cat ${stdout}", returnStdout: true).trim()
res['status'] = status
} finally {
- sh(script: "rm ${stderr}", returnStdout: true)
- sh(script: "rm ${stdout}", returnStdout: true)
+ sh(script: "set +x; rm ${stderr}")
+ sh(script: "set +x; rm ${stdout}")
}
return res
diff --git a/src/com/mirantis/mk/Debian.groovy b/src/com/mirantis/mk/Debian.groovy
index d6c82db..6bb1f50 100644
--- a/src/com/mirantis/mk/Debian.groovy
+++ b/src/com/mirantis/mk/Debian.groovy
@@ -265,8 +265,8 @@
common.infoMsg("Running upgrade on ${target}")
salt.runSaltProcessStep(env, target, 'pkg.refresh_db', [], null, true)
- def cmd = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade;'
- salt.runSaltProcessStep(env, target, 'cmd.run', [cmd])
+ def cmd = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade'
+ salt.cmdRun(env, target, cmd)
}
/**
@@ -281,8 +281,8 @@
common.infoMsg("Running dist-upgrade on ${target}")
salt.runSaltProcessStep(env, target, 'pkg.refresh_db', [], null, true)
- def cmd = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
- salt.runSaltProcessStep(env, target, 'cmd.run', [cmd])
+ def cmd = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade'
+ salt.cmdRun(env, target, cmd)
}
/**
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index d0d0b1e..cd7d2f4 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -274,7 +274,9 @@
// Running minion states in a batch to avoid races related to certificates which are placed on glusterfs
// Details on races: https://mirantis.jira.com/browse/PROD-25796
// TODO: Run in parallel when glusterfs for certificates is dropped in cookiecutter
- salt.enforceStateWithTest([saltId: master, target: "I@nginx:server ${extra_tgt}", state: 'salt.minion', batch: 1])
+ salt.enforceStateWithTest([saltId: master, target: "I@nginx:server ${extra_tgt}", state: 'salt.minion', batch: 1, failOnError: false, retries: 2])
+ salt.enforceStateWithTest([saltId: master, target: "I@nginx:server ${extra_tgt}", state: 'salt.minion', batch: 1, failOnError: true, retries: 1])
+
salt.enforceStateWithTest([saltId: master, target: "I@nginx:server ${extra_tgt}", state: 'nginx'])
// setup keystone service
@@ -785,8 +787,10 @@
}
// Jenkins
- def jenkins_master_url_pillar = salt.getPillar(master, jenkins_compound, '_param:jenkins_master_url')
- jenkins_master_url = salt.getReturnValues(jenkins_master_url_pillar)
+ def jenkins_master_host = salt.getReturnValues(salt.getPillar(master, jenkins_compound, '_param:jenkins_master_host'))
+ def jenkins_master_port = salt.getReturnValues(salt.getPillar(master, jenkins_compound, '_param:jenkins_master_port'))
+ def jenkins_master_protocol = salt.getReturnValues(salt.getPillar(master, jenkins_compound, '_param:jenkins_master_protocol'))
+ jenkins_master_url = "${jenkins_master_protocol}://${jenkins_master_host}:${jenkins_master_port}"
timeout(wait_timeout) {
common.infoMsg('Waiting for Jenkins to come up..')
@@ -867,23 +871,35 @@
salt.enforceStateWithTest([saltId: master, target: "I@kibana:server:enabled:true ${extra_tgt}", state: 'kibana.server'])
// Check ES health cluster status
- def pillar = salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:host')
+ def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:host'))
def elasticsearch_vip
- if(!pillar['return'].isEmpty()) {
- elasticsearch_vip = pillar['return'][0].values()[0]
+ if(pillar) {
+ elasticsearch_vip = pillar
} else {
common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
}
- pillar = salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:port')
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:port'))
def elasticsearch_port
- if(!pillar['return'].isEmpty()) {
- elasticsearch_port = pillar['return'][0].values()[0]
+ if(pillar) {
+ elasticsearch_port = pillar
} else {
common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
}
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:scheme'))
+ def elasticsearch_scheme
+ if(pillar) {
+ elasticsearch_scheme = pillar
+ common.infoMsg("[INFO] Using elasticsearch scheme: ${elasticsearch_scheme}")
+ } else {
+ common.infoMsg('[INFO] No pillar with Elasticsearch server scheme, using scheme: http')
+ elasticsearch_scheme = "http"
+ }
+
common.retry(step_retries,step_retries_wait) {
common.infoMsg('Waiting for Elasticsearch to become green..')
- salt.cmdRun(master, "I@elasticsearch:client ${extra_tgt}", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+ salt.cmdRun(master, "I@elasticsearch:client ${extra_tgt}", "curl -skf ${elasticsearch_scheme}://${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
}
salt.enforceState([saltId: master, target: "I@elasticsearch:client ${extra_tgt}", state: 'elasticsearch.client', retries: step_retries, retries_wait: step_retries_wait])
@@ -930,7 +946,7 @@
salt.enforceState([saltId: master, target: "I@docker:swarm and I@prometheus:server ${extra_tgt}", state: 'prometheus'])
//Configure Remote Collector in Docker Swarm for Openstack deployments
- if (!common.checkContains('STACK_INSTALL', 'k8s')) {
+ if (salt.testTarget(master, "I@heka:remote_collector ${extra_tgt}")) {
salt.enforceState([saltId: master, target: "I@docker:swarm and I@prometheus:server ${extra_tgt}", state: 'heka.remote_collector', failOnError: false])
}
@@ -964,7 +980,8 @@
common.infoMsg("Waiting for service on http://${stacklight_vip}:15013/ to start")
sleep(120)
- salt.enforceState([saltId: master, target: "I@grafana:client ${extra_tgt}", state: 'grafana.client'])
+
+ salt.enforceState([saltId: master, target: "I@grafana:client ${extra_tgt}", state: 'grafana.client', retries: step_retries, retries_wait: step_retries_wait])
}
def installStacklightv1Control(master, extra_tgt = '') {
@@ -1020,7 +1037,7 @@
// Install collectd, heka and sensu services on the nodes, this will also
// generate the metadata that goes into the grains and eventually into Salt Mine
salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: 'collectd'])
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: 'salt.minion'])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: 'salt.minion', retries: 2])
salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: 'heka'])
// Gather the Grafana metadata as grains
diff --git a/src/com/mirantis/mk/Python.groovy b/src/com/mirantis/mk/Python.groovy
index 6183f51..82e9ee3 100644
--- a/src/com/mirantis/mk/Python.groovy
+++ b/src/com/mirantis/mk/Python.groovy
@@ -56,19 +56,24 @@
* @param path Path to virtualenv
* @param cmd Command to be executed
* @param silent dont print any messages (optional, default false)
+ * @param flexAnswer return answer like a dict, with format ['status' : int, 'stderr' : str, 'stdout' : str ]
*/
-def runVirtualenvCommand(path, cmd, silent=false) {
+def runVirtualenvCommand(path, cmd, silent = false, flexAnswer = false) {
def common = new com.mirantis.mk.Common()
-
- virtualenv_cmd = "set +x; . ${path}/bin/activate; ${cmd}"
- if(!silent){
+ def res
+ def virtualenv_cmd = "set +x; . ${path}/bin/activate; ${cmd}"
+ if (!silent) {
common.infoMsg("[Python ${path}] Run command ${cmd}")
}
- output = sh(
- returnStdout: true,
- script: virtualenv_cmd
- ).trim()
- return output
+ if (flexAnswer) {
+ res = common.shCmdStatus(virtualenv_cmd)
+ } else {
+ res = sh(
+ returnStdout: true,
+ script: virtualenv_cmd
+ ).trim()
+ }
+ return res
}
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index 4e76538..7fd8ad0 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -1125,35 +1125,53 @@
* @param venv Path to virtualenv with
*/
-def runPepperCommand(data, venv) {
+def runPepperCommand(data, venv) {
def common = new com.mirantis.mk.Common()
def python = new com.mirantis.mk.Python()
def dataStr = new groovy.json.JsonBuilder(data).toString()
+ // TODO(alexz): parametrize?
+ int retry = 10
def pepperCmdFile = "${venv}/pepper-cmd.json"
writeFile file: pepperCmdFile, text: dataStr
def pepperCmd = "pepper -c ${venv}/pepperrc --make-token -x ${venv}/.peppercache --json-file ${pepperCmdFile}"
- if (venv) {
- output = python.runVirtualenvCommand(venv, pepperCmd, true)
- } else {
- echo("[Command]: ${pepperCmd}")
- output = sh (
- script: pepperCmd,
- returnStdout: true
- ).trim()
- }
-
+ int tries = 0
+ def FullOutput = ['status': 1]
def outputObj
+ while (tries++ < retry) {
+ try {
+ if (venv) {
+ FullOutput = python.runVirtualenvCommand(venv, pepperCmd, true, true)
+ } else {
+ FullOutput = common.shCmdStatus(pepperCmd)
+ }
+ if (FullOutput['status'] != 0) {
+ error()
+ }
+ break
+ } catch (e) {
+ // Check , if we get failed pepper HTTP call, and retry
+ common.errorMsg("Command: ${pepperCmd} failed to execute with error:\n${FullOutput['stderr']}")
+ if (FullOutput['stderr'].contains('Error with request: HTTP Error 50') || FullOutput['stderr'].contains('Pepper error: Server error')) {
+ common.errorMsg("Pepper HTTP Error detected. Most probably, " +
+ "master SaltReqTimeoutError in master zmq thread issue...lets retry ${tries}/${retry}")
+ sleep(5)
+ continue
+ }
+ }
+ }
+ // Try to parse json output. No sense to check exit code, since we always expect json answer only.
try {
- outputObj = new groovy.json.JsonSlurperClassic().parseText(output)
- } catch(Exception e) {
- common.errorMsg("Parsing Salt API JSON response failed! Response: " + output)
- throw e
+ outputObj = new groovy.json.JsonSlurperClassic().parseText(FullOutput['stdout'])
+ } catch (Exception jsonE) {
+ common.errorMsg('Parsing Salt API JSON response failed! Response: ' + FullOutput)
+ throw jsonE
}
return outputObj
}
+
/**
* Check time settings on defined nodes, compares them
* and evaluates the results