Merge "Replace /etc/aptly-publisher.yaml -> /etc/aptly/publisher.yaml"
diff --git a/src/com/mirantis/mcp/Validate.groovy b/src/com/mirantis/mcp/Validate.groovy
index 670b101..e023b4c 100644
--- a/src/com/mirantis/mcp/Validate.groovy
+++ b/src/com/mirantis/mcp/Validate.groovy
@@ -245,60 +245,114 @@
*
* @param target Host to run tests
* @param dockerImageLink Docker image link
+ * @param platform What do we have underneath (openstack/k8s)
* @param output_dir Directory for results
* @param repository Git repository with files for Rally
* @param branch Git branch which will be used during the checkout
+ * @param scenarios Directory inside repo with specific scenarios
+ * @param tasks_args_file Argument file that is used for throttling settings
* @param ext_variables The list of external variables
* @param results The reports directory
*/
-def runRallyTests(master, target, dockerImageLink, output_dir, repository, branch, scenarios, tasks_args_file, ext_variables = [], results = '/root/qa_results') {
+def runRallyTests(master, target, dockerImageLink, platform, output_dir, repository, branch, scenarios = '', tasks_args_file = '', ext_variables = [], results = '/root/qa_results') {
def salt = new com.mirantis.mk.Salt()
def output_file = 'docker-rally.log'
def dest_folder = '/home/rally/qa_results'
+ def env_vars = []
+ def rally_extra_args = ''
+ def cmd_rally_init = ''
+ def cmd_rally_checkout = ''
+ def cmd_rally_start = ''
+ def cmd_rally_task_args = ''
+ def cmd_report = "rally task export --type junit-xml --to ${dest_folder}/report-rally.xml; " +
+ "rally task report --out ${dest_folder}/report-rally.html"
salt.runSaltProcessStep(master, target, 'file.remove', ["${results}"])
salt.runSaltProcessStep(master, target, 'file.mkdir', ["${results}", "mode=777"])
- def _pillar = salt.getPillar(master, 'I@keystone:server', 'keystone:server')
- def keystone = _pillar['return'][0].values()[0]
- def env_vars = ( ['tempest_version=15.0.0',
- "OS_USERNAME=${keystone.admin_name}",
- "OS_PASSWORD=${keystone.admin_password}",
- "OS_TENANT_NAME=${keystone.admin_tenant}",
- "OS_AUTH_URL=http://${keystone.bind.private_address}:${keystone.bind.private_port}/v2.0",
- "OS_REGION_NAME=${keystone.region}",
- 'OS_ENDPOINT_TYPE=admin'] + ext_variables ).join(' -e ')
- def cmd0 = ''
- def cmd = '/opt/devops-qa-tools/deployment/configure.sh; ' +
- 'rally task start combined_scenario.yaml ' +
- '--task-args-file /opt/devops-qa-tools/rally-scenarios/task_arguments.yaml; '
- if (repository != '' ) {
- cmd = 'rally db create; ' +
+ if (platform == 'openstack') {
+ def _pillar = salt.getPillar(master, 'I@keystone:server', 'keystone:server')
+ def keystone = _pillar['return'][0].values()[0]
+ env_vars = ( ['tempest_version=15.0.0',
+ "OS_USERNAME=${keystone.admin_name}",
+ "OS_PASSWORD=${keystone.admin_password}",
+ "OS_TENANT_NAME=${keystone.admin_tenant}",
+ "OS_AUTH_URL=http://${keystone.bind.private_address}:${keystone.bind.private_port}/v2.0",
+ "OS_REGION_NAME=${keystone.region}",
+ 'OS_ENDPOINT_TYPE=admin'] + ext_variables ).join(' -e ')
+ if (repository == '' ) {
+ cmd_rally_init = ''
+ cmd_rally_start = '/opt/devops-qa-tools/deployment/configure.sh; ' +
+ "rally $rally_extra_args task start combined_scenario.yaml " +
+ '--task-args-file /opt/devops-qa-tools/rally-scenarios/task_arguments.yaml; '
+ cmd_rally_checkout = ''
+ } else {
+ cmd_rally_init = 'rally db create; ' +
'rally deployment create --fromenv --name=existing; ' +
'rally deployment config; '
+ cmd_rally_checkout = "git clone -b ${branch ?: 'master'} ${repository} test_config; "
if (scenarios == '') {
- cmd += 'rally task start test_config/rally/scenario.yaml '
+ cmd_rally_start = "rally $rally_extra_args task start test_config/rally/scenario.yaml "
} else {
- cmd += "rally task start scenarios.yaml "
- cmd0 = "git clone -b ${branch ?: 'master'} ${repository} test_config; " +
- "if [ -f ${scenarios} ]; then cp ${scenarios} scenarios.yaml; " +
- "else " +
- "find -L ${scenarios} -name '*.yaml' -exec cat {} >> scenarios.yaml \\; ; " +
- "sed -i '/---/d' scenarios.yaml; fi; "
+ cmd_rally_start = "rally $rally_extra_args task start scenarios.yaml "
+ cmd_rally_checkout += "if [ -f ${scenarios} ]; then cp ${scenarios} scenarios.yaml; " +
+ "else " +
+ "find -L ${scenarios} -name '*.yaml' -exec cat {} >> scenarios.yaml \\; ; " +
+ "sed -i '/---/d' scenarios.yaml; fi; "
}
- switch(tasks_args_file) {
- case 'none':
- cmd += '; '
- break
- case '':
- cmd += '--task-args-file test_config/rally/task_arguments.yaml; '
- break
- default:
- cmd += "--task-args-file ${tasks_args_file}; "
- break
+ }
+ } else if (platform == 'k8s') {
+ rally_extra_args = "--debug --log-file ${dest_folder}/task.log"
+ env_vars = ( ['tempest_version=15.0.0','KUBE_CONF=local']).join(' -e ')
+ def plugins_repo = ext_variables.plugins_repo
+ def plugins_branch = ext_variables.plugins_branch
+ def kubespec = 'existing@kubernetes:\n config_file: ' +
+ "${dest_folder}/kube.config\n"
+ def kube_config = salt.getReturnValues(salt.runSaltProcessStep(master,
+ 'I@kubernetes:master and *01*', 'cmd.run',
+ ["cat /etc/kubernetes/admin-kube-config"]))
+ def tmp_dir = '/tmp/kube'
+ salt.runSaltProcessStep(master, target, 'file.mkdir', ["${tmp_dir}", "mode=777"])
+ writeFile file: "${tmp_dir}/kubespec.yaml", text: kubespec
+ writeFile file: "${tmp_dir}/kube.config", text: kube_config
+ salt.cmdRun(master, target, "mv ${tmp_dir}/* ${results}/")
+ salt.runSaltProcessStep(master, target, 'file.rmdir', ["${tmp_dir}"])
+ cmd_rally_init = 'set -x; if [ ! -w ~/.rally ]; then sudo chown rally:rally ~/.rally ; fi; cd /tmp/; ' +
+ "git clone -b ${plugins_branch ?: 'master'} ${plugins_repo} plugins; " +
+ "sudo pip install --upgrade ./plugins; " +
+ "rally env create --name k8s --spec ${dest_folder}/kubespec.yaml; " +
+ "rally env check k8s; "
+ if (repository == '' ) {
+ cmd_rally_start = "rally $rally_extra_args task start " +
+ "./plugins/samples/scenarios/kubernetes/run-namespaced-pod.yaml; "
+ cmd_rally_checkout = ''
+ } else {
+ cmd_rally_checkout = "git clone -b ${branch ?: 'master'} ${repository} test_config; "
+ if (scenarios == '') {
+ cmd_rally_start = "rally $rally_extra_args task start test_config/rally-k8s/run-namespaced-pod.yaml "
+ } else {
+ cmd_rally_start = "rally $rally_extra_args task start scenarios.yaml "
+ cmd_rally_checkout += "if [ -f ${scenarios} ]; then cp ${scenarios} scenarios.yaml; " +
+ "else " +
+ "find -L ${scenarios} -name '*.yaml' -exec cat {} >> scenarios.yaml \\; ; " +
+ "sed -i '/---/d' scenarios.yaml; fi; "
}
+ }
+ } else {
+ throw new Exception("Platform ${platform} is not supported yet")
}
- cmd += "rally task export --type junit-xml --to ${dest_folder}/report-rally.xml; " +
- "rally task report --out ${dest_folder}/report-rally.html"
- full_cmd = cmd0 + cmd
+ if (repository != '' ) {
+ switch(tasks_args_file) {
+ case 'none':
+ cmd_rally_task_args = '; '
+ break
+ case '':
+ cmd_rally_task_args = '--task-args-file test_config/job-params-light.yaml; '
+ break
+ default:
+ cmd_rally_task_args = "--task-args-file ${tasks_args_file}; "
+ break
+ }
+ }
+ full_cmd = cmd_rally_init + cmd_rally_checkout + cmd_rally_start + cmd_rally_task_args + cmd_report
salt.runSaltProcessStep(master, target, 'file.touch', ["${results}/rally.db"])
salt.cmdRun(master, target, "chmod 666 ${results}/rally.db")
salt.cmdRun(master, target, "docker run -i --rm --net=host -e ${env_vars} " +
diff --git a/src/com/mirantis/mk/Git.groovy b/src/com/mirantis/mk/Git.groovy
index 9e3c460..d20c159 100644
--- a/src/com/mirantis/mk/Git.groovy
+++ b/src/com/mirantis/mk/Git.groovy
@@ -198,10 +198,10 @@
}
}
if (followTags == true) {
- ssh.agentSh "git push target --tags"
+ ssh.agentSh "git push -f target --tags"
if (pushSourceTags == true) {
- ssh.agentSh "git push origin --tags"
+ ssh.agentSh "git push -f origin --tags"
}
}
sh "git remote rm target"
diff --git a/src/com/mirantis/mk/Openstack.groovy b/src/com/mirantis/mk/Openstack.groovy
index 04ce85e..eedfbd8 100644
--- a/src/com/mirantis/mk/Openstack.groovy
+++ b/src/com/mirantis/mk/Openstack.groovy
@@ -478,31 +478,3 @@
salt.runSaltProcessStep(env, 'I@galera:slave', 'service.start', ['mysql'])
}
-
-/**
- * Recovers
- * @param master Salt master
- * @param recoverHost Hostname of the node to be recovered
- * @param healthyHost Hostname of healthy node from the same cluster
- * @return output of salt commands
- */
-def recoverGluster(master, recoverHost, healthyHost) {
- def salt = new com.mirantis.mk.Salt()
-
- // Recover glusterfs
- if (salt.testTarget(master, 'I@glusterfs:server')) {
- salt.enforceState(master, 'I@glusterfs:server', 'glusterfs.server.service')
- if (healthyHost != 'none' && recoverHost != 'none') {
- salt.runSaltCommand(master, 'local', ['expression': "E@${healthyHost}", 'type': 'compound'], "cp.push /var/lib/glusterd/vols/ upload_path='/tmp/'")
- salt.runSaltCommand(master, 'local', ['expression': "E@${recoverHost}", 'type': 'compound'], "get_dir salt://tmp/vols/ /var/lib/glusterd/")
- }
- salt.enforceState(master, 'I@glusterfs:server and *01*', 'glusterfs.server.setup', true, true, null, false, -1, 5)
- sleep(10)
- salt.cmdRun(master, 'I@glusterfs:server', "gluster peer status; gluster volume status")
- }
-
- // Ensure glusterfs clusters is ready
- if (salt.testTarget(master, 'I@glusterfs:client')) {
- salt.enforceState(master, 'I@glusterfs:client', 'glusterfs.client')
- }
-}
\ No newline at end of file
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index b0ecd96..3f7c068 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -37,7 +37,9 @@
} catch (Throwable e) {
common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
}
- salt.enforceState(master, '*', ['linux.system'])
+ common.retry(2,5){
+ salt.enforceState(master, '*', ['linux.system'])
+ }
if (staticMgmtNet) {
salt.runSaltProcessStep(master, '*', 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
}
@@ -75,7 +77,9 @@
} catch (Throwable e) {
common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
}
- salt.enforceState(master, target, ['linux.system'])
+ common.retry(2,5){
+ salt.enforceState(master, target, ['linux.system'])
+ }
if (staticMgmtNet) {
salt.runSaltProcessStep(master, target, 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
}
@@ -94,37 +98,35 @@
def installInfraKvm(master) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
- salt.fullRefresh(master, 'I@linux:system')
- def infra_conpund = 'I@salt:control'
+ def infra_compound = 'I@salt:control'
def minions = []
def wait_timeout = 10
def retries = wait_timeout * 30
+ salt.fullRefresh(master, 'I@linux:system')
salt.enforceState(master, 'I@salt:control', ['salt.minion'], true, false, null, false, 60, 2)
salt.enforceState(master, 'I@salt:control', ['linux.system', 'linux.network', 'ntp', 'rsyslog'])
salt.enforceState(master, 'I@salt:control', 'libvirt')
salt.enforceState(master, 'I@salt:control', 'salt.control')
- timeout(wait_timeout) {
- common.infoMsg("Waiting for minions to come up...")
- if (salt.testTarget(master, infra_conpund)) {
- // Gathering minions
- for ( infra_node in salt.getMinionsSorted(master, infra_conpund) ) {
- def pillar = salt.getPillar(master, infra_node, 'salt:control:cluster')
- if ( !pillar['return'].isEmpty() ) {
- for ( cluster in pillar['return'][0].values() ) {
- def engine = cluster.values()[0]['engine']
- def domain = cluster.values()[0]['domain']
- def node = cluster.values()[0]['node']
- if ( engine == "virt" ) {
- def nodes = node.values()
- if ( !nodes.isEmpty() ) {
- for ( vm in nodes ) {
- if ( vm['name'] != null ) {
- def vm_fqdn = vm['name'] + '.' + domain
- if ( !minions.contains(vm_fqdn) ) {
- minions.add(vm_fqdn)
- }
+ common.infoMsg("Building minions list...")
+ if (salt.testTarget(master, infra_compound)) {
+ // Gathering minions
+ for ( infra_node in salt.getMinionsSorted(master, infra_compound) ) {
+ def pillar = salt.getPillar(master, infra_node, 'salt:control:cluster')
+ if ( !pillar['return'].isEmpty() ) {
+ for ( cluster in pillar['return'][0].values() ) {
+ def engine = cluster.values()[0]['engine']
+ def domain = cluster.values()[0]['domain']
+ def node = cluster.values()[0]['node']
+ if ( engine == "virt" ) {
+ def nodes = node.values()
+ if ( !nodes.isEmpty() ) {
+ for ( vm in nodes ) {
+ if ( vm['name'] != null ) {
+ def vm_fqdn = vm['name'] + '.' + domain
+ if ( !minions.contains(vm_fqdn) ) {
+ minions.add(vm_fqdn)
}
}
}
@@ -133,13 +135,18 @@
}
}
}
+ }
- def minions_compound = minions.join(' or ')
- common.infoMsg('Waiting for next minions to register: ' + minions_compound,)
+ def minions_compound = minions.join(' or ')
+
+ common.infoMsg("Waiting for next minions to register within ${wait_timeout} minutes: " + minions_compound)
+ timeout(time: wait_timeout, unit: 'MINUTES') {
salt.minionsPresentFromList(master, 'I@salt:master', minions, true, null, true, retries, 1)
- common.infoMsg('Waiting for minions to respond')
- salt.minionsReachable(master, 'I@salt:master', minions_compound )
+ }
+ common.infoMsg('Waiting for minions to respond')
+ timeout(time: wait_timeout, unit: 'MINUTES') {
+ salt.minionsReachable(master, 'I@salt:master', minions_compound)
}
common.infoMsg("All minions are up.")
@@ -162,7 +169,7 @@
// Ensure glusterfs clusters is ready
if (salt.testTarget(master, 'I@glusterfs:client')) {
- salt.enforceState(master, 'I@glusterfs:client', 'glusterfs.client')
+ salt.enforceState(master, 'I@glusterfs:client', 'glusterfs.client', true, true, null, false, -1, 2)
}
// Install galera
@@ -771,6 +778,8 @@
def installStacklight(master) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+ def retries_wait = 20
+ def retries = 15
// Install core services for K8S environments:
// HAProxy, Nginx and lusterFS clients
@@ -790,18 +799,14 @@
// Install MongoDB for Alerta
if (salt.testTarget(master, 'I@mongodb:server')) {
- salt.enforceState(master, 'I@mongodb:server', 'mongodb')
- }
+ salt.enforceState(master, 'I@mongodb:server', 'mongodb.server')
- // Configure Alerta
- if (salt.testTarget(master, 'I@prometheus:alerta')) {
- salt.enforceState(master, 'I@docker:swarm and I@prometheus:alerta', 'prometheus.alerta')
+ // Initialize mongodb replica set
+ common.retry(5,20){
+ salt.enforceState(master, 'I@mongodb:server', 'mongodb.cluster')
+ }
}
- // Launch containers
- salt.enforceState(master, 'I@docker:swarm:role:master and I@prometheus:server', 'docker.client')
- salt.runSaltProcessStep(master, 'I@docker:swarm and I@prometheus:server', 'dockerng.ps')
-
//Install Telegraf
salt.enforceState(master, 'I@telegraf:agent or I@telegraf:remote_agent', 'telegraf')
@@ -815,8 +820,34 @@
salt.enforceState(master, 'I@elasticsearch:server', 'elasticsearch.server')
salt.enforceState(master, '*01* and I@kibana:server', 'kibana.server')
salt.enforceState(master, 'I@kibana:server', 'kibana.server')
- salt.enforceState(master, 'I@elasticsearch:client', 'elasticsearch.client')
- salt.enforceState(master, 'I@kibana:client', 'kibana.client')
+
+ // Check ES health cluster status
+ def pillar = salt.getPillar(master, 'I@elasticsearch:client', 'elasticsearch:client:server:host')
+ def elasticsearch_vip
+ if(!pillar['return'].isEmpty()) {
+ elasticsearch_vip = pillar['return'][0].values()[0]
+ } else {
+ common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
+ }
+ pillar = salt.getPillar(master, 'I@elasticsearch:client', 'elasticsearch:client:server:port')
+ def elasticsearch_port
+ if(!pillar['return'].isEmpty()) {
+ elasticsearch_port = pillar['return'][0].values()[0]
+ } else {
+ common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
+ }
+ common.retry(retries,retries_wait) {
+ common.infoMsg('Waiting for Elasticsearch to become green..')
+ salt.cmdRun(master, 'I@elasticsearch:client', "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+ }
+
+ common.retry(retries,retries_wait) {
+ salt.enforceState(master, 'I@elasticsearch:client', 'elasticsearch.client')
+ }
+
+ common.retry(retries,retries_wait) {
+ salt.enforceState(master, 'I@kibana:client', 'kibana.client')
+ }
//Install InfluxDB
if (salt.testTarget(master, 'I@influxdb:server')) {
@@ -824,11 +855,6 @@
salt.enforceState(master, 'I@influxdb:server', 'influxdb')
}
- //Install Prometheus LTS
- if (salt.testTarget(master, 'I@prometheus:relay')) {
- salt.enforceState(master, 'I@prometheus:relay', 'prometheus')
- }
-
// Install service for the log collection
if (salt.testTarget(master, 'I@fluentd:agent')) {
salt.enforceState(master, 'I@fluentd:agent', 'fluentd')
@@ -866,13 +892,22 @@
salt.enforceState(master, 'I@docker:swarm and I@prometheus:server', 'heka.remote_collector', true, false)
}
+ // Launch containers
+ salt.enforceState(master, 'I@docker:swarm:role:master and I@prometheus:server', 'docker.client')
+ salt.runSaltProcessStep(master, 'I@docker:swarm and I@prometheus:server', 'dockerng.ps')
+
+ //Install Prometheus LTS
+ if (salt.testTarget(master, 'I@prometheus:relay')) {
+ salt.enforceState(master, 'I@prometheus:relay', 'prometheus')
+ }
+
// Install sphinx server
if (salt.testTarget(master, 'I@sphinx:server')) {
salt.enforceState(master, 'I@sphinx:server', 'sphinx')
}
//Configure Grafana
- def pillar = salt.getPillar(master, 'ctl01*', '_param:stacklight_monitor_address')
+ pillar = salt.getPillar(master, 'ctl01*', '_param:stacklight_monitor_address')
common.prettyPrint(pillar)
def stacklight_vip
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index 0f81287..188af61 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -162,9 +162,10 @@
* @param retries Retry count for salt state. (optional, default -1 - no retries)
* @param queue salt queue parameter for state.sls calls (optional, default true) - CANNOT BE USED WITH BATCH
* @param saltArgs additional salt args eq. ["runas=aptly", exclude="opencontrail.database"]
+ * @param minionRestartWaitTimeout specifies timeout that we should wait after minion restart.
* @return output of salt command
*/
-def enforceState(saltId, target, state, output = true, failOnError = true, batch = null, optional = false, read_timeout=-1, retries=-1, queue=true, saltArgs = []) {
+def enforceState(saltId, target, state, output = true, failOnError = true, batch = null, optional = false, read_timeout=-1, retries=-1, queue=true, saltArgs = [], minionRestartWaitTimeout=10) {
def common = new com.mirantis.mk.Common()
// add state to salt args
if (state instanceof String) {
@@ -196,7 +197,7 @@
out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'state.sls', batch, saltArgs.reverse(), kwargs, -1, read_timeout)
checkResult(out, failOnError, output)
}
- waitForMinion(out)
+ waitForMinion(out, minionRestartWaitTimeout)
return out
} else {
common.infoMsg("No Minions matched the target given, but 'optional' param was set to true - Pipeline continues. ")
@@ -269,27 +270,35 @@
if (waitUntilPresent){
def count = 0
while(count < maxRetries) {
+ try {
+ def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, 5)
+ if (output) {
+ printSaltCommandResult(out)
+ }
+ def valueMap = out["return"][0]
+ def result = valueMap.get(valueMap.keySet()[0])
+ def resultsArray = result.tokenize("\n")
+ def size = resultsArray.size()
+ if (size >= answers) {
+ return out
+ }
+ count++
+ sleep(time: 1000, unit: 'MILLISECONDS')
+ common.infoMsg("Waiting for ${cmd} on ${target} to be in correct state")
+ } catch (Exception er) {
+ common.infoMsg('[WARNING]: runSaltCommand command read timeout within 5 seconds. You have very slow or broken environment')
+ }
+ }
+ } else {
+ try {
def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, 5)
if (output) {
printSaltCommandResult(out)
}
- def valueMap = out["return"][0]
- def result = valueMap.get(valueMap.keySet()[0])
- def resultsArray = result.tokenize("\n")
- def size = resultsArray.size()
- if (size >= answers) {
- return out
- }
- count++
- sleep(time: 1000, unit: 'MILLISECONDS')
- common.infoMsg("Waiting for ${cmd} on ${target} to be in correct state")
+ return out
+ } catch (Exception er) {
+ common.infoMsg('[WARNING]: runSaltCommand command read timeout within 5 seconds. You have very slow or broken environment')
}
- } else {
- def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, 5)
- if (output) {
- printSaltCommandResult(out)
- }
- return out
}
// otherwise throw exception
common.errorMsg("Status of command ${cmd} on ${target} failed, please check it.")
@@ -801,7 +810,7 @@
*
* @param result Parsed response of Salt API
*/
-def waitForMinion(result) {
+def waitForMinion(result, minionRestartWaitTimeout=10) {
def common = new com.mirantis.mk.Common()
//In order to prevent multiple sleeps use bool variable to catch restart for any minion.
def isMinionRestarted = false
@@ -843,8 +852,8 @@
}
}
if (isMinionRestarted){
- common.infoMsg("Salt minion service restart detected. Sleep 10 seconds to wait minion restart")
- sleep(10)
+ common.infoMsg("Salt minion service restart detected. Sleep ${minionRestartWaitTimeout} seconds to wait minion restart")
+ sleep(minionRestartWaitTimeout)
}
}