Merge "Add 'retries_wait' parameter for enforceState method - Fix usage of non-defiened retries_wait variable" into release/2019.2.0
diff --git a/src/com/mirantis/mcp/Validate.groovy b/src/com/mirantis/mcp/Validate.groovy
index 29402fe..23d974b 100644
--- a/src/com/mirantis/mcp/Validate.groovy
+++ b/src/com/mirantis/mcp/Validate.groovy
@@ -46,6 +46,7 @@
def common = new com.mirantis.mk.Common()
def variables = ''
def entry_point = ''
+ def cluster_name = salt.getPillar(master, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
if ( salt.cmdRun(master, target, "docker ps -f name=${name} -q", false, null, false)['return'][0].values()[0] ) {
salt.cmdRun(master, target, "docker rm -f ${name}")
}
@@ -56,7 +57,8 @@
entry_point = '--entrypoint /bin/bash'
}
salt.cmdRun(master, target, "docker run -tid --net=host --name=${name} " +
- "-u root ${entry_point} ${variables} ${dockerImageLink}")
+ "-u root ${entry_point} ${variables} " +
+ "-v /srv/salt/pki/${cluster_name}/:/etc/certs ${dockerImageLink}")
}
@@ -98,9 +100,11 @@
keystone.add("OS_AUTH_URL=${_pillar.auth.auth_url}/v3")
keystone.add("OS_REGION_NAME=${_pillar.region_name}")
keystone.add("OS_IDENTITY_API_VERSION=${_pillar.identity_api_version}")
- keystone.add("OS_ENDPOINT_TYPE=admin")
+ keystone.add("OS_ENDPOINT_TYPE=internal")
keystone.add("OS_PROJECT_DOMAIN_NAME=${_pillar.auth.project_domain_name}")
keystone.add("OS_USER_DOMAIN_NAME=${_pillar.auth.user_domain_name}")
+ // we mount /srv/salt/pki/${cluster_name}/:/etc/certs with certs for cvp container
+ keystone.add("OS_CACERT='/etc/certs/proxy-with-chain.crt'")
return keystone
}
else {
diff --git a/src/com/mirantis/mk/Common.groovy b/src/com/mirantis/mk/Common.groovy
index 901b842..aecbc9b 100644
--- a/src/com/mirantis/mk/Common.groovy
+++ b/src/com/mirantis/mk/Common.groovy
@@ -525,6 +525,9 @@
/**
* Retry commands passed to body
*
+ * Don't use common.retry method for retrying salt.enforceState method. Use retries parameter
+ * built-in the salt.enforceState method instead to ensure correct functionality.
+ *
* @param times Number of retries
* @param delay Delay between retries (in seconds)
* @param body Commands to be in retry block
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index a62503f..592650c 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -44,15 +44,11 @@
} catch (Throwable e) {
common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
}
- common.retry(2,5){
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system']])
- }
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system'], retries: 2])
if (staticMgmtNet) {
salt.runSaltProcessStep(master, "* ${extra_tgt}", 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
}
- common.retry(2,5){
- salt.enforceState([saltId: master, target: "I@linux:network:interface ${extra_tgt}", state: ['linux.network.interface']])
- }
+ salt.enforceState([saltId: master, target: "I@linux:network:interface ${extra_tgt}", state: ['linux.network.interface'], retries: 2])
sleep(5)
salt.enforceState([saltId: master, target: "I@linux:system ${extra_tgt}", state: ['linux', 'openssh', 'ntp', 'rsyslog']])
@@ -91,9 +87,7 @@
} catch (Throwable e) {
common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
}
- common.retry(2,5){
- salt.enforceState([saltId: master, target: target, state: ['linux.system']])
- }
+ salt.enforceState([saltId: master, target: target, state: ['linux.system'], retries: 2])
if (staticMgmtNet) {
salt.runSaltProcessStep(master, target, 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
}
@@ -394,6 +388,16 @@
salt.enforceStateWithTest([saltId: master, target: "I@barbican:server:role:primary ${extra_tgt}", state: 'barbican.server', testTargetMatcher: "I@barbican:server ${extra_tgt}"])
salt.enforceStateWithTest([saltId: master, target: "I@barbican:server ${extra_tgt}", state: 'barbican.server'])
+ if (salt.testTarget(master, "I@barbican:server ${extra_tgt}")) {
+ // Restart apache to make sure we don't have races between barbican-api and barbican-worker on db init.
+ // For more info please see PROD-26988
+ // The permanent fix is prepared to barbican formula https://gerrit.mcp.mirantis.com/#/c/35097/ but due to rush in release
+ // add this workaround here as well.
+ // TODO(vsaienko): cleanup once release passed in favor of permanent fix.
+ salt.runSaltProcessStep(master, "I@barbican:server ${extra_tgt}", 'service.restart', ['apache2'])
+ sleep(30)
+ }
+
// Install barbican client
salt.enforceStateWithTest([saltId: master, target: "I@barbican:client ${extra_tgt}", state: 'barbican.client'])
@@ -646,12 +650,12 @@
// Run k8s on first node without master.setup and master.kube-addons
salt.enforceStateWithExclude([saltId: master, target: "${first_target} ${extra_tgt}", state: "kubernetes.master", excludedStates: "kubernetes.master.setup,kubernetes.master.kube-addons"])
// Run k8s without master.setup and master.kube-addons
- salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup,kubernetes.master.kube-addons"])
+ salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup,kubernetes.master.kube-addons,kubernetes.client"])
} else {
// Run k8s on first node without master.setup and master.kube-addons
salt.enforceStateWithExclude([saltId: master, target: "${first_target} ${extra_tgt}", state: "kubernetes.master", excludedStates: "kubernetes.master.setup"])
// Run k8s without master.setup
- salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup"])
+ salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup,kubernetes.client"])
}
// Run k8s master setup
@@ -685,6 +689,13 @@
salt.runSaltProcessStep(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", 'service.restart', ['kubelet'])
}
+def installKubernetesClient(master, extra_tgt = '') {
+ def salt = new com.mirantis.mk.Salt()
+
+ // Install kubernetes client
+ salt.enforceStateWithTest([saltId: master, target: "I@kubernetes:client ${extra_tgt}", state: 'kubernetes.client'])
+}
+
def installDockerSwarm(master, extra_tgt = '') {
def salt = new com.mirantis.mk.Salt()
@@ -814,8 +825,8 @@
def installStacklight(master, extra_tgt = '') {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
- def retries_wait = 20
- def retries = 15
+ def step_retries_wait = 20
+ def step_retries = 15
def first_target
// Install core services for K8S environments:
@@ -824,9 +835,7 @@
// In case of OpenStack, those are already installed
if (common.checkContains('STACK_INSTALL', 'k8s')) {
salt.enforceStateWithTest([saltId: master, target: "I@glusterfs:client ${extra_tgt}", state: 'glusterfs.client', retries: 2])
- common.retry(3, 5){
- salt.enforceState([saltId: master, target: "I@nginx:server ${extra_tgt}", state: 'salt.minion.cert'])
- }
+ salt.enforceState([saltId: master, target: "I@nginx:server ${extra_tgt}", state: 'salt.minion.cert', retries: 3])
salt.enforceState([saltId: master, target: "I@haproxy:proxy ${extra_tgt}", state: 'haproxy'])
salt.runSaltProcessStep(master, "I@haproxy:proxy ${extra_tgt}", 'service.status', ['haproxy'])
@@ -839,9 +848,7 @@
salt.enforceState([saltId: master, target: "I@mongodb:server ${extra_tgt}", state: 'mongodb.server'])
// Initialize mongodb replica set
- common.retry(5,20){
- salt.enforceState([saltId: master, target: "I@mongodb:server ${extra_tgt}", state: 'mongodb.cluster'])
- }
+ salt.enforceState([saltId: master, target: "I@mongodb:server ${extra_tgt}", state: 'mongodb.cluster', retries: 5, retries_wait: 20])
}
//Install Telegraf
@@ -877,18 +884,14 @@
} else {
common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
}
- common.retry(retries,retries_wait) {
+ common.retry(step_retries,step_retries_wait) {
common.infoMsg('Waiting for Elasticsearch to become green..')
salt.cmdRun(master, "I@elasticsearch:client ${extra_tgt}", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
}
- common.retry(retries,retries_wait) {
- salt.enforceState([saltId: master, target: "I@elasticsearch:client ${extra_tgt}", state: 'elasticsearch.client'])
- }
+ salt.enforceState([saltId: master, target: "I@elasticsearch:client ${extra_tgt}", state: 'elasticsearch.client', retries: step_retries, retries_wait: step_retries_wait])
- common.retry(retries,retries_wait) {
- salt.enforceState([saltId: master, target: "I@kibana:client ${extra_tgt}", state: 'kibana.client'])
- }
+ salt.enforceState([saltId: master, target: "I@kibana:client ${extra_tgt}", state: 'kibana.client', retries: step_retries, retries_wait: step_retries_wait])
//Install InfluxDB
if (salt.testTarget(master, "I@influxdb:server ${extra_tgt}")) {
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index 3c44dfe..09e1199 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -485,6 +485,39 @@
}
/**
+ * You can call this function when need to check that all minions are available, free and ready for command execution
+ * @param config LinkedHashMap config parameter, which contains next:
+ * @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+ * @param target unique identification of a minion or group of salt minions
+ * @param target_reachable unique identification of a minion or group of salt minions to check availability
+ * @param wait timeout between retries to check target minions (default 5)
+ * @param retries finite number of iterations to check minions (default 10)
+ * @param timeout timeout for the salt command if minions do not return (default 5)
+ * @param availability check that minions also are available before checking readiness (default true)
+ */
+def checkTargetMinionsReady(LinkedHashMap config) {
+ def common = new com.mirantis.mk.Common()
+ def saltId = config.get('saltId')
+ def target = config.get('target')
+ def target_reachable = config.get('target_reachable', target)
+ def wait = config.get('wait', 30)
+ def retries = config.get('retries', 10)
+ def timeout = config.get('timeout', 5)
+ def checkAvailability = config.get('availability', true)
+ common.retry(retries, wait) {
+ if (checkAvailability) {
+ minionsReachable(saltId, 'I@salt:master', target_reachable)
+ }
+ def running = runSaltProcessStep(saltId, target, 'saltutil.running', [], null, true, timeout)
+ for (value in running.get("return")[0].values()) {
+ if (value != []) {
+ throw new Exception("Not all salt-minions are ready for execution")
+ }
+ }
+ }
+}
+
+/**
* Run command on salt minion (salt cmd.run wrapper)
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target