Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0
99125a7 Add toString() method for package name for isPackageInstalled func
b2a6795 Add common function for getting Salt worker_threads value
4babe45 Update Galera functions for verify/restore
f119aa5 Update runSaltCommand command to support batch
9c56bc0 Get rid of hardcoded node names in pipeline library
Change-Id: Idac9bf2025b0ca7ba194079bf3c433f399f4c516
diff --git a/src/com/mirantis/mk/Galera.groovy b/src/com/mirantis/mk/Galera.groovy
index 3a10a1c..e5ffe12 100644
--- a/src/com/mirantis/mk/Galera.groovy
+++ b/src/com/mirantis/mk/Galera.groovy
@@ -50,50 +50,44 @@
* of Salt mysql.status function. The result is then parsed, validated and outputed to the user.
*
* @param env Salt Connection object or pepperEnv
- * @param slave Boolean value to enable slave checking (if master in unreachable)
* @param checkTimeSync Boolean value to enable time sync check
* @return resultCode int values used to determine exit status in the calling function
*/
-def verifyGaleraStatus(env, slave=false, checkTimeSync=false) {
+def verifyGaleraStatus(env, checkTimeSync=false) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def out = ""
- def status = "unknown"
- def testNode = ""
- if (!slave) {
- try {
- galeraMaster = salt.getMinions(env, "I@galera:master")
- common.infoMsg("Current Galera master is: ${galeraMaster}")
- salt.minionsReachable(env, "I@salt:master", "I@galera:master")
- testNode = "I@galera:master"
- } catch (Exception e) {
- common.errorMsg('Galera master is not reachable.')
- common.errorMsg(e.getMessage())
- return 128
- }
- } else {
- try {
- galeraSlaves = salt.getMinions(env, "I@galera:slave")
- common.infoMsg("Testing Galera slave minions: ${galeraSlaves}")
- } catch (Exception e) {
- common.errorMsg("Cannot obtain Galera slave minions list.")
- common.errorMsg(e.getMessage())
- return 129
- }
- for (minion in galeraSlaves) {
+ def mysqlStatusReport = [
+ 'clusterMembersOnPower': [],
+ 'clusterMembersNotAvailable': [],
+ 'clusterMembersInClusterAlive': [],
+ 'clusterMembersNotAlive': [],
+ 'error': 0
+ ]
+
+ try {
+ def clusterMembers = salt.getMinions(env, "I@galera:master or I@galera:slave")
+ for (minion in clusterMembers) {
try {
salt.minionsReachable(env, "I@salt:master", minion)
- testNode = minion
- break
+ mysqlStatusReport['clusterMembersOnPower'] << minion
} catch (Exception e) {
common.warningMsg("Slave '${minion}' is not reachable.")
+ mysqlStatusReport['clusterMembersNotAvailable'] << minion
}
}
+ } catch (Exception e) {
+ common.errorMsg('Cannot obtain Galera minions list.')
+ common.errorMsg(e.getMessage())
+ mysqlStatusReport['error'] = 128
+ return mysqlStatusReport
}
- if (!testNode) {
- common.errorMsg("No Galera slave was reachable.")
- return 130
+
+ if (!mysqlStatusReport['clusterMembersOnPower']) {
+ common.errorMsg("No Galera member was reachable.")
+ mysqlStatusReport['error'] = 130
+ return mysqlStatusReport
}
+
def checkTargets = salt.getMinions(env, "I@xtrabackup:client or I@xtrabackup:server")
for (checkTarget in checkTargets) {
def nodeStatus = salt.minionsReachable(env, 'I@salt:master', checkTarget, null, 10, 5)
@@ -101,13 +95,15 @@
def iostatRes = salt.getIostatValues(['saltId': env, 'target': checkTarget, 'parameterName': "%util", 'output': true])
if (iostatRes == [:]) {
common.errorMsg("Recevived empty response from iostat call on ${checkTarget}. Maybe 'sysstat' package is not installed?")
- return 140
+ mysqlStatusReport['error'] = 140
+ return mysqlStatusReport
}
for (int i = 0; i < iostatRes.size(); i++) {
def diskKey = iostatRes.keySet()[i]
if (!(iostatRes[diskKey].toString().isBigDecimal() && (iostatRes[diskKey].toBigDecimal() < 50 ))) {
common.errorMsg("Disk ${diskKey} has to high i/o utilization. Maximum value is 50 and current value is ${iostatRes[diskKey]}.")
- return 141
+ mysqlStatusReport['error'] = 141
+ return mysqlStatusReport
}
}
}
@@ -115,36 +111,65 @@
common.infoMsg("Disk i/o utilization was checked and everything seems to be in order.")
if (checkTimeSync && !salt.checkClusterTimeSync(env, "I@galera:master or I@galera:slave")) {
common.errorMsg("Time in cluster is desynchronized or it couldn't be detemined. You should fix this issue manually before proceeding.")
- return 131
+ mysqlStatusReport['error'] = 131
+ return mysqlStatusReport
}
+
+ for(member in mysqlStatusReport['clusterMembersOnPower']) {
+ def clusterStatus = getWsrepParameters(env, member, 'wsrep_cluster_status')
+ if (clusterStatus['wsrep_cluster_status']) {
+ mysqlStatusReport['clusterMembersInClusterAlive'] << member
+ } else {
+ mysqlStatusReport['clusterMembersNotAlive'] << member
+ }
+ }
+ if (!mysqlStatusReport['clusterMembersInClusterAlive']) {
+ common.errorMsg("Could not determine mysql status, because all nodes are not connected to cluster.")
+ mysqlStatusReport['error'] = 256
+ return mysqlStatusReport
+ }
+ def testNode = mysqlStatusReport['clusterMembersInClusterAlive'].sort().first()
+
try {
- out = salt.runSaltProcessStep(env, "${testNode}", "mysql.status", [], null, false)
+ mysqlStatusReport['statusRaw'] = salt.runSaltProcessStep(env, testNode, "mysql.status", [], null, false)
} catch (Exception e) {
common.errorMsg('Could not determine mysql status.')
common.errorMsg(e.getMessage())
- return 256
+ mysqlStatusReport['error'] = 256
+ return mysqlStatusReport
}
- if (out) {
+
+ def status = "unknown"
+ def galeraMasterNode = salt.getReturnValues(salt.getPillar(env, testNode, "galera:master:enabled")) ? true : false
+
+ if (mysqlStatusReport['statusRaw']) {
try {
- status = validateAndPrintGaleraStatusReport(env, out, testNode)
+ status = validateAndPrintGaleraStatusReport(env, mysqlStatusReport['statusRaw'], testNode, galeraMasterNode)
} catch (Exception e) {
common.errorMsg('Could not parse the mysql status output. Check it manually.')
common.errorMsg(e.getMessage())
- return 1
}
} else {
- common.errorMsg("Mysql status response unrecognized or is empty. Response: ${out}")
- return 1024
+ common.errorMsg("Mysql status response unrecognized or is empty. Response: ${mysqlStatusReport['statusRaw']}")
}
+ if (mysqlStatusReport['clusterMembersNotAvailable']) {
+ common.errorMsg("Next nodes are unavailable: ${mysqlStatusReport['clusterMembersNotAvailable'].join(',')}")
+ }
+ if (mysqlStatusReport['clusterMembersNotAlive']) {
+ common.errorMsg("Next nodes are not connected to cluster: ${mysqlStatusReport['clusterMembersNotAlive'].join(',')}")
+ }
+
if (status == "OK") {
common.infoMsg("No errors found - MySQL status is ${status}.")
- return 0
+ return mysqlStatusReport
} else if (status == "unknown") {
common.warningMsg('MySQL status cannot be detemined')
- return 1
+ mysqlStatusReport['error'] = 1
+ return mysqlStatusReport
} else {
common.errorMsg("Errors found.")
- return 2
+ mysqlStatusReport['error'] = 2
+ return mysqlStatusReport
}
}
@@ -154,13 +179,12 @@
@return status "OK", "ERROR" or "uknown" depending on result of validation
*/
-def validateAndPrintGaleraStatusReport(env, out, minion) {
+def validateAndPrintGaleraStatusReport(env, out, minion, nodeRoleMaster=false) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- if (minion == "I@galera:master") {
- role = "master"
- } else {
- role = "slave"
+ def role = 'slave'
+ if (nodeRoleMaster) {
+ role = 'master'
}
sizeOut = salt.getReturnValues(salt.getPillar(env, minion, "galera:${role}:members"))
expected_cluster_size = sizeOut.size()
@@ -308,10 +332,10 @@
*/
def manageServiceMysql(env, targetNode, action, checkStatus=true, checkState='running') {
def salt = new com.mirantis.mk.Salt()
- salt.runSaltProcessStep(env, lastNodeTarget, "service.${action}", ['mysql'])
+ salt.runSaltProcessStep(env, targetNode, "service.${action}", ['mysql'])
if (checkStatus) {
try {
- salt.commandStatus(env, lastNodeTarget, 'service mysql status', checkState)
+ salt.commandStatus(env, targetNode, 'service mysql status', checkState)
} catch (Exception er) {
input message: "Database is not running please fix it first and only then click on PROCEED."
}
@@ -321,34 +345,62 @@
/**
* Restores Galera cluster
* @param env Salt Connection object or pepperEnv
- * @param runRestoreDb Boolean to determine if the restoration of DB should be run as well
+ * @param galeraStatus Map, Status of Galera cluster output from verifyGaleraStatus func
+ * @param restoreDb Run restore DB procedure
* @return output of salt commands
*/
-def restoreGaleraCluster(env, runRestoreDb=true) {
+def restoreGaleraCluster(env, galeraStatus, restoreDb=true) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- lastNodeTarget = getGaleraLastShutdownNode(env)
- manageServiceMysql(env, lastNodeTarget, 'stop', false)
- if (runRestoreDb) {
- salt.cmdRun(env, lastNodeTarget, "mkdir -p /root/mysql/mysql.bak")
- salt.cmdRun(env, lastNodeTarget, "rm -rf /root/mysql/mysql.bak/*")
- salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+ def nodesToRecover = []
+ def total = false // whole cluster
+ if (galeraStatus['clusterMembersNotAlive']) {
+ nodesToRecover = galeraStatus['clusterMembersNotAlive']
+ if (galeraStatus['clusterMembersInClusterAlive'].size() == 0) {
+ total = true
+ }
+ } else {
+ nodesToRecover = galeraStatus['clusterMembersInClusterAlive']
+ total = true
}
- salt.cmdRun(env, lastNodeTarget, "rm -f /var/lib/mysql/.galera_bootstrap")
- // make sure that gcom parameter is empty
- salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ def lastNodeTarget = ''
+ if (total) {
+ manageServiceMysql(env, 'I@galera:slave', 'stop', true, 'inactive')
+ manageServiceMysql(env, 'I@galera:master', 'stop', true, 'inactive')
+ lastNodeTarget = getGaleraLastShutdownNode(env) // in case if master was already down before
+ salt.cmdRun(env, "( I@galera:master or I@galera:slave ) and not ${lastNodeTarget}", "rm -f /var/lib/mysql/ib_logfile*")
+ salt.cmdRun(env, "( I@galera:master or I@galera:slave ) and not ${lastNodeTarget}", "rm -f /var/lib/mysql/grastate.dat")
+ } else {
+ lastNodeTarget = nodesToRecover.join(' or ')
+ manageServiceMysql(env, lastNodeTarget, 'stop', true, 'inactive')
+ }
- // run restore of DB
- if (runRestoreDb) {
+ if (restoreDb) {
+ def timestamp = common.getDatetime()
+ salt.cmdRun(env, lastNodeTarget, "mkdir -p /root/mysql")
+ def bakDir = salt.getReturnValues(salt.cmdRun(env, lastNodeTarget, "mktemp -d --suffix='_${timestamp}' /root/mysql/mysql.bak.XXXXXX", false))
+ salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* ${bakDir} || echo 'Nothing to backup from directory /var/lib/mysql/'")
+ }
+ if (total) {
+ // make sure that gcom parameter is empty
+ salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ } else if (!restoreDb) {
+ // node rejoin
+ salt.cmdRun(env, lastNodeTarget, "rm -f /var/lib/mysql/ib_logfile*")
+ salt.cmdRun(env, lastNodeTarget, "rm -f /var/lib/mysql/grastate.dat")
+ }
+
+ if (restoreDb) {
restoreGaleraDb(env, lastNodeTarget)
}
manageServiceMysql(env, lastNodeTarget, 'start')
- // apply any changes in configuration and return value to gcom parameter and then restart mysql to catch
- salt.enforceState(['saltId': env, 'target': lastNodeTarget, 'state': 'galera'])
- manageServiceMysql(env, lastNodeTarget, 'restart')
+ if (total) {
+ manageServiceMysql(env, "( I@galera:master or I@galera:slave ) and not ${lastNodeTarget}", 'start')
+ salt.runSaltProcessStep(env, lastNodeTarget, 'state.sls_id', ['galera_config', 'galera'])
+ }
}
/**
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index 055b70b..1c66d8a 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -19,7 +19,7 @@
salt.runSaltProcessStep(master, "I@salt:minion ${extra_tgt}", 'state.show_top')
}
-def installFoundationInfra(master, staticMgmtNet=false, extra_tgt = '') {
+def installFoundationInfra(master, staticMgmtNet=false, extra_tgt = '', batch=20) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
@@ -30,48 +30,48 @@
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['linux.system']])
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['salt.master'], failOnError: false, read_timeout: 120, retries: 2])
- salt.fullRefresh(master, "* ${extra_tgt}")
+ salt.fullRefresh(master, "* ${extra_tgt}", batch)
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['salt.minion'], failOnError: false, read_timeout: 60, retries: 2])
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['salt.minion']])
- salt.fullRefresh(master, "* ${extra_tgt}")
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.proxy'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.fullRefresh(master, "* ${extra_tgt}", batch)
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.proxy'], batch: batch, failOnError: false, read_timeout: 180, retries: 2])
// Make sure all repositories are in place before proceeding with package installation from other states
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system.repo'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system.repo'], batch: batch, failOnError: false, read_timeout: 180, retries: 2])
try {
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion.base'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion.base'], batch: batch, failOnError: false, read_timeout: 180, retries: 2])
sleep(5)
} catch (Throwable e) {
common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
}
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system'], retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system'], batch: batch, retries: 2])
if (staticMgmtNet) {
- salt.runSaltProcessStep(master, "* ${extra_tgt}", 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
+ salt.runSaltProcessStep(master, "* ${extra_tgt}", 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], batch, true, 180)
}
- salt.enforceState([saltId: master, target: "I@linux:network:interface ${extra_tgt}", state: ['linux.network.interface'], retries: 2])
+ salt.enforceState([saltId: master, target: "I@linux:network:interface ${extra_tgt}", state: ['linux.network.interface'], batch: batch, retries: 2])
sleep(5)
- salt.enforceState([saltId: master, target: "I@linux:system ${extra_tgt}", state: ['linux', 'openssh', 'ntp', 'rsyslog']])
+ salt.enforceState([saltId: master, target: "I@linux:system ${extra_tgt}", state: ['linux', 'openssh', 'ntp', 'rsyslog'], batch: batch])
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion'], failOnError: false, batch: batch, read_timeout: 180, retries: 2])
sleep(5)
- salt.fullRefresh(master, "* ${extra_tgt}")
- salt.runSaltProcessStep(master, "* ${extra_tgt}", 'mine.update', [], null, true)
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.host']])
+ salt.fullRefresh(master, "* ${extra_tgt}", batch)
+ salt.runSaltProcessStep(master, "* ${extra_tgt}", 'mine.update', [], batch, true)
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.host'], batch: batch])
// Install and configure iptables
- salt.enforceStateWithTest([saltId: master, target: "I@iptables:service ${extra_tgt}", state: 'iptables'])
+ salt.enforceStateWithTest([saltId: master, target: "I@iptables:service ${extra_tgt}", state: 'iptables', batch: batch])
// Install and configure logrotate
- salt.enforceStateWithTest([saltId: master, target: "I@logrotate:server ${extra_tgt}", state: 'logrotate'])
+ salt.enforceStateWithTest([saltId: master, target: "I@logrotate:server ${extra_tgt}", state: 'logrotate', batch: batch])
// Install and configure auditd
- salt.enforceStateWithTest([saltId: master, target: "I@auditd:service ${extra_tgt}", state: 'auditd'])
+ salt.enforceStateWithTest([saltId: master, target: "I@auditd:service ${extra_tgt}", state: 'auditd', batch: batch])
// Install and configure openscap
- salt.enforceStateWithTest([saltId: master, target: "I@openscap:service ${extra_tgt}", state: 'openscap'])
+ salt.enforceStateWithTest([saltId: master, target: "I@openscap:service ${extra_tgt}", state: 'openscap', batch: batch])
}
def installFoundationInfraOnTarget(master, target, staticMgmtNet=false, extra_tgt = '') {
@@ -158,7 +158,7 @@
}
common.infoMsg("All minions are up.")
- salt.fullRefresh(master, "* and not kvm* ${extra_tgt}")
+ salt.fullRefresh(master, "* and not I@salt:control ${extra_tgt}")
}
@@ -498,20 +498,20 @@
}
-def installOpenstackCompute(master, extra_tgt = '') {
+def installOpenstackCompute(master, extra_tgt = '', batch=20) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
// Configure compute nodes
def compute_compound = "I@nova:compute ${extra_tgt}"
- if (salt.testTarget(master, compute_compound)) {
+ if (salt.testTarget(master, compute_compound, batch)) {
// In case if infrastructure nodes are used as nova computes too
def gluster_compound = "I@glusterfs:server ${extra_tgt}"
def salt_ca_compound = "I@salt:minion:ca:salt_master_ca ${extra_tgt}"
// Enforce highstate asynchronous only on compute nodes which are not glusterfs and not salt ca servers
def hightstateTarget = "${compute_compound} and not ${gluster_compound} and not ${salt_ca_compound}"
- if (salt.testTarget(master, hightstateTarget)) {
+ if (salt.testTarget(master, hightstateTarget, batch)) {
retry(2) {
- salt.enforceHighstate(master, hightstateTarget)
+ salt.enforceHighstate(master, hightstateTarget, false, true, batch)
}
} else {
common.infoMsg("No minions matching highstate target found for target ${hightstateTarget}")
@@ -519,8 +519,8 @@
// Iterate through salt ca servers and check if they have compute role
// TODO: switch to batch once salt 2017.7+ would be used
common.infoMsg("Checking whether ${salt_ca_compound} minions have ${compute_compound} compound")
- for ( target in salt.getMinionsSorted(master, salt_ca_compound) ) {
- for ( cmp_target in salt.getMinionsSorted(master, compute_compound) ) {
+ for ( target in salt.getMinionsSorted(master, salt_ca_compound, batch) ) {
+ for ( cmp_target in salt.getMinionsSorted(master, compute_compound, batch) ) {
if ( target == cmp_target ) {
// Enforce highstate one by one on salt ca servers which are compute nodes
retry(2) {
@@ -532,8 +532,8 @@
// Iterate through glusterfs servers and check if they have compute role
// TODO: switch to batch once salt 2017.7+ would be used
common.infoMsg("Checking whether ${gluster_compound} minions have ${compute_compound} compound")
- for ( target in salt.getMinionsSorted(master, gluster_compound) ) {
- for ( cmp_target in salt.getMinionsSorted(master, compute_compound) ) {
+ for ( target in salt.getMinionsSorted(master, gluster_compound, batch) ) {
+ for ( cmp_target in salt.getMinionsSorted(master, compute_compound, batch) ) {
if ( target == cmp_target ) {
// Enforce highstate one by one on glusterfs servers which are compute nodes
retry(2) {
@@ -729,8 +729,8 @@
def installCicd(master, extra_tgt = '') {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def gerrit_compound = "I@gerrit:client and ci* ${extra_tgt}"
- def jenkins_compound = "I@jenkins:client and ci* ${extra_tgt}"
+ def gerrit_compound = "I@gerrit:client and I@_param:drivetrain_role:cicd ${extra_tgt}"
+ def jenkins_compound = "I@jenkins:client and I@_param:drivetrain_role:cicd ${extra_tgt}"
salt.fullRefresh(master, gerrit_compound)
salt.fullRefresh(master, jenkins_compound)
@@ -738,7 +738,7 @@
// Temporary exclude cfg node from docker.client state (PROD-24934)
def dockerClientExclude = !salt.getPillar(master, 'I@salt:master', 'docker:client:stack:jenkins').isEmpty() ? 'and not I@salt:master' : ''
// Pull images first if any
- def listCIMinions = salt.getMinions(master, "ci* ${dockerClientExclude} ${extra_tgt}")
+ def listCIMinions = salt.getMinions(master, "I@_param:drivetrain_role:cicd ${dockerClientExclude} ${extra_tgt}")
for (int i = 0; i < listCIMinions.size(); i++) {
if (!salt.getReturnValues(salt.getPillar(master, listCIMinions[i], 'docker:client:images')).isEmpty()) {
salt.enforceState([saltId: master, target: listCIMinions[i], state: 'docker.client.images', retries: 2])
@@ -969,7 +969,7 @@
salt.enforceStateWithTest([saltId: master, target: "I@sphinx:server ${extra_tgt}", state: 'sphinx'])
//Configure Grafana
- pillar = salt.getPillar(master, "ctl01* ${extra_tgt}", '_param:stacklight_monitor_address')
+ pillar = salt.getPillar(master, "I@keystone:server:role:primary ${extra_tgt}", '_param:stacklight_monitor_address')
common.prettyPrint(pillar)
def stacklight_vip
@@ -1264,7 +1264,7 @@
def salt = new com.mirantis.mk.Salt()
//Get oss VIP address
- def pillar = salt.getPillar(master, "cfg01* ${extra_tgt}", '_param:stacklight_monitor_address')
+ def pillar = salt.getPillar(master, "I@salt:master ${extra_tgt}", '_param:stacklight_monitor_address')
common.prettyPrint(pillar)
def oss_vip
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index 0a9c78e..8c6384c 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -1,6 +1,5 @@
package com.mirantis.mk
-import com.cloudbees.groovy.cps.NonCPS
import java.util.stream.Collectors
/**
* Salt functions
@@ -50,14 +49,16 @@
* data: ['expression': 'I@openssh:server', 'type': 'compound'])
* @param function Function to execute (eg. "state.sls")
* @param batch Batch param to salt (integer or string with percents)
+ * - null - automatic decision (based on number of worker threads env var or not use batch at all)
+ * - int - fixed size of batch
+ * - 'str%' - percantage of the requests in one batch
* @param args Additional arguments to function
* @param kwargs Additional key-value arguments to function
* @param timeout Additional argument salt api timeout
* @param read_timeout http session read timeout
*/
-@NonCPS
-def runSaltCommand(saltId, client, target, function, batch = null, args = null, kwargs = null, timeout = -1, read_timeout = -1) {
+def runSaltCommand(saltId, client, target, function, batch = null, args = null, kwargs = null, timeout = -1, read_timeout = -1) {
data = [
'tgt': target.expression,
'fun': function,
@@ -65,9 +66,14 @@
'expr_form': target.type,
]
- if(batch != null){
+ if (batch) {
batch = batch.toString()
- if( (batch.isInteger() && batch.toInteger() > 0) || (batch.contains("%"))){
+ } else if (env.getEnvironment().containsKey('SALT_MASTER_OPT_WORKER_THREADS')) {
+ batch = env['SALT_MASTER_OPT_WORKER_THREADS'].toString()
+ }
+
+ if (batch instanceof String) {
+ if ((batch.isInteger() && batch.toInteger() > 0) || (batch.matches(/(\d){1,2}%/))){
data['client']= "local_batch"
data['batch'] = batch
}
@@ -85,6 +91,7 @@
data['timeout'] = timeout
}
+ def result = [:]
// Command will be sent using HttpRequest
if (saltId instanceof HashMap && saltId.containsKey("authToken") ) {
@@ -93,13 +100,22 @@
]
def http = new com.mirantis.mk.Http()
- return http.sendHttpPostRequest("${saltId.url}/", data, headers, read_timeout)
+ result = http.sendHttpPostRequest("${saltId.url}/", data, headers, read_timeout)
} else if (saltId instanceof HashMap) {
throw new Exception("Invalid saltId")
+ } else {
+ // Command will be sent using Pepper
+ result = runPepperCommand(data, saltId)
}
- // Command will be sent using Pepper
- return runPepperCommand(data, saltId)
+ // Convert returned Object to the same structure as from 'local' client to keep compatibility
+ if (data['client'].equals('local_batch')) {
+ def resultMap = ['return': [[:]]]
+ result['return'].each { it -> resultMap['return'][0] = it + resultMap['return'][0] }
+ return resultMap
+ } else {
+ return result
+ }
}
/**
@@ -107,13 +123,14 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
* @param pillar pillar name (optional)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def getPillar(saltId, target, pillar = null) {
+def getPillar(saltId, target, pillar = null, batch = null) {
if (pillar != null) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.get', null, [pillar.replace('.', ':')])
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.get', batch, [pillar.replace('.', ':')])
} else {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.data')
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.data', batch)
}
}
@@ -122,13 +139,14 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get grain target
* @param grain grain name (optional)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def getGrain(saltId, target, grain = null) {
+def getGrain(saltId, target, grain = null, batch = null) {
if(grain != null) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.item', null, [grain])
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.item', batch, [grain])
} else {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.items')
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.items', batch)
}
}
@@ -137,10 +155,11 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get grain target
* @param config grain name (optional)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def getConfig(saltId, target, config) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'config.get', null, [config.replace('.', ':')], '--out=json')
+def getConfig(saltId, target, config, batch = null) {
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'config.get', batch, [config.replace('.', ':')], '--out=json')
}
/**
@@ -207,7 +226,7 @@
if (!params.testTargetMatcher) {
params.testTargetMatcher = params.target
}
- if (testTarget(params.saltId, params.testTargetMatcher)) {
+ if (testTarget(params.saltId, params.testTargetMatcher, params.batch)) {
return enforceState(params)
} else {
if (!params.optional) {
@@ -268,7 +287,7 @@
kwargs["queue"] = true
}
- if (params.optional == false || testTarget(params.saltId, params.target)){
+ if (params.optional == false || testTarget(params.saltId, params.target, params.batch)){
if (params.retries > 0){
def retriesCounter = 0
retry(params.retries){
@@ -426,7 +445,7 @@
* @return output of salt command
*/
def minionsPresent(saltId, target = 'I@salt:master', target_minions = '', waitUntilPresent = true, batch=null, output = true, maxRetries = 200, answers = 1) {
- def target_hosts = getMinionsSorted(saltId, target_minions)
+ def target_hosts = getMinionsSorted(saltId, target_minions, batch)
for (t in target_hosts) {
def tgt = stripDomainName(t)
minionPresent(saltId, target, tgt, waitUntilPresent, batch, output, maxRetries, answers)
@@ -517,11 +536,12 @@
def retries = config.get('retries', 10)
def timeout = config.get('timeout', 5)
def checkAvailability = config.get('availability', true)
+ def batch = config.get('batch', null)
common.retry(retries, wait) {
if (checkAvailability) {
- minionsReachable(saltId, 'I@salt:master', target_reachable)
+ minionsReachable(saltId, 'I@salt:master', target_reachable, batch)
}
- def running = runSaltProcessStep(saltId, target, 'saltutil.running', [], null, true, timeout)
+ def running = runSaltProcessStep(saltId, target, 'saltutil.running', [], batch, true, timeout)
for (value in running.get("return")[0].values()) {
if (value != []) {
throw new Exception("Not all salt-minions are ready for execution")
@@ -653,10 +673,11 @@
* Perform complete salt sync between master and target
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def syncAll(saltId, target) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'saltutil.sync_all')
+def syncAll(saltId, target, batch = null) {
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'saltutil.sync_all', batch)
}
/**
@@ -664,12 +685,13 @@
* Method will call saltutil.refresh_pillar, saltutil.refresh_grains and saltutil.sync_all
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def fullRefresh(saltId, target){
- runSaltProcessStep(saltId, target, 'saltutil.refresh_pillar', [], null, true)
- runSaltProcessStep(saltId, target, 'saltutil.refresh_grains', [], null, true)
- runSaltProcessStep(saltId, target, 'saltutil.sync_all', [], null, true)
+def fullRefresh(saltId, target, batch=20){
+ runSaltProcessStep(saltId, target, 'saltutil.refresh_pillar', [], batch, true)
+ runSaltProcessStep(saltId, target, 'saltutil.refresh_grains', [], batch, true)
+ runSaltProcessStep(saltId, target, 'saltutil.sync_all', [], batch, true)
}
/**
@@ -710,10 +732,11 @@
* Get running minions IDs according to the target
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return list of active minions fitin
*/
-def getMinions(saltId, target) {
- def minionsRaw = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'test.ping')
+def getMinions(saltId, target, batch = null) {
+ def minionsRaw = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'test.ping', batch)
return new ArrayList<String>(minionsRaw['return'][0].keySet())
}
@@ -721,20 +744,22 @@
* Get sorted running minions IDs according to the target
* @param saltId Salt Connection object or pepperEnv
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return list of sorted active minions fitin
*/
-def getMinionsSorted(saltId, target) {
- return getMinions(saltId, target).sort()
+def getMinionsSorted(saltId, target, batch = null) {
+ return getMinions(saltId, target, batch).sort()
}
/**
* Get first out of running minions IDs according to the target
* @param saltId Salt Connection object or pepperEnv
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return first of active minions fitin
*/
-def getFirstMinion(saltId, target) {
- def minionsSorted = getMinionsSorted(saltId, target)
+def getFirstMinion(saltId, target, batch = null) {
+ def minionsSorted = getMinionsSorted(saltId, target, batch)
return minionsSorted[0]
}
@@ -742,10 +767,11 @@
* Get running salt minions IDs without it's domain name part and its numbering identifications
* @param saltId Salt Connection object or pepperEnv
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return list of active minions fitin without it's domain name part name numbering
*/
-def getMinionsGeneralName(saltId, target) {
- def minionsSorted = getMinionsSorted(saltId, target)
+def getMinionsGeneralName(saltId, target, batch = null) {
+ def minionsSorted = getMinionsSorted(saltId, target, batch)
return stripDomainName(minionsSorted[0]).replaceAll('\\d+$', "")
}
@@ -773,7 +799,7 @@
* @return Return values of a salt command
*/
def getReturnValues(output) {
- if(output.containsKey("return") && !output.get("return").isEmpty()) {
+ if(output && output.containsKey("return") && !output.get("return").isEmpty()) {
return output['return'][0].values()[0]
}
def common = new com.mirantis.mk.Common()
@@ -816,11 +842,12 @@
* Test if there are any minions to target
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Target to test
+ * @param batch Batch param to salt (integer or string with percents)
* @return bool indicating if target was succesful
*/
-def testTarget(saltId, target) {
- return getMinions(saltId, target).size() > 0
+def testTarget(saltId, target, batch = null) {
+ return getMinions(saltId, target, batch).size() > 0
}
/**
@@ -829,10 +856,11 @@
* @param target Key generating target
* @param host Key generating host
* @param keysize generated key size (optional, default 4096)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def generateNodeKey(saltId, target, host, keysize = 4096) {
- return runSaltCommand(saltId, 'wheel', target, 'key.gen_accept', [host], ['keysize': keysize])
+def generateNodeKey(saltId, target, host, keysize = 4096, batch = null) {
+ return runSaltCommand(saltId, 'wheel', target, 'key.gen_accept', batch, [host], ['keysize': keysize])
}
/**
@@ -842,10 +870,11 @@
* @param host Metadata generating host
* @param classes Reclass classes
* @param parameters Reclass parameters
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def generateNodeMetadata(saltId, target, host, classes, parameters) {
- return runSaltCommand(saltId, 'local', target, 'reclass.node_create', [host, '_generated'], ['classes': classes, 'parameters': parameters])
+def generateNodeMetadata(saltId, target, host, classes, parameters, batch = null) {
+ return runSaltCommand(saltId, 'local', target, 'reclass.node_create', batch, [host, '_generated'], ['classes': classes, 'parameters': parameters])
}
/**
@@ -854,14 +883,15 @@
* @param target Orchestration target
* @param orchestrate Salt orchestrate params
* @param kwargs Salt orchestrate params
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def orchestrateSystem(saltId, target, orchestrate=[], kwargs = null) {
+def orchestrateSystem(saltId, target, orchestrate=[], kwargs = null, batch = null) {
//Since the runSaltCommand uses "arg" (singular) for "runner" client this won`t work correctly on old salt 2016
//cause this version of salt used "args" (plural) for "runner" client, see following link for reference:
//https://github.com/saltstack/salt/pull/32938
def common = new com.mirantis.mk.Common()
- def result = runSaltCommand(saltId, 'runner', target, 'state.orchestrate', true, orchestrate, kwargs, 7200, 7200)
+ def result = runSaltCommand(saltId, 'runner', target, 'state.orchestrate', batch, orchestrate, kwargs, 7200, 7200)
if(result != null){
if(result['return']){
def retcode = result['return'][0].get('retcode')
@@ -884,21 +914,21 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param pillar_tree Reclass pillar that has orchestrate pillar for desired stage
* @param extra_tgt Extra targets for compound
- *
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def orchestratePrePost(saltId, pillar_tree, extra_tgt = '') {
+def orchestratePrePost(saltId, pillar_tree, extra_tgt = '', batch = null) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def compound = 'I@' + pillar_tree + " " + extra_tgt
common.infoMsg("Refreshing pillars")
- runSaltProcessStep(saltId, '*', 'saltutil.refresh_pillar', [], null, true)
+ runSaltProcessStep(saltId, '*', 'saltutil.refresh_pillar', [], batch, true)
common.infoMsg("Looking for orchestrate pillars")
- if (salt.testTarget(saltId, compound)) {
- for ( node in salt.getMinionsSorted(saltId, compound) ) {
+ if (salt.testTarget(saltId, compound, batch)) {
+ for ( node in salt.getMinionsSorted(saltId, compound, batch) ) {
def pillar = salt.getPillar(saltId, node, pillar_tree)
if ( !pillar['return'].isEmpty() ) {
for ( orch_id in pillar['return'][0].values() ) {
@@ -906,7 +936,7 @@
def orch_enabled = orch_id.values()['enabled']
if ( orch_enabled ) {
common.infoMsg("Orchestrating: ${orchestrator}")
- salt.printSaltCommandResult(salt.orchestrateSystem(saltId, ['expression': node], [orchestrator]))
+ salt.printSaltCommandResult(salt.orchestrateSystem(saltId, ['expression': node], [orchestrator], null, batch))
}
}
}
@@ -932,11 +962,8 @@
def out
common.infoMsg("Running step ${fun} ${arg} on ${tgt}")
-
- if (batch == true) {
- out = runSaltCommand(saltId, 'local_batch', ['expression': tgt, 'type': 'compound'], fun, String.valueOf(batch), arg, kwargs, timeout)
- } else if (async == true) {
- out = runSaltCommand(saltId, 'local_async', ['expression': tgt, 'type': 'compound'], fun, batch, arg, kwargs, timeout)
+ if (async == true) {
+ out = runSaltCommand(saltId, 'local_async', ['expression': tgt, 'type': 'compound'], fun, null, arg, kwargs, timeout)
} else {
out = runSaltCommand(saltId, 'local', ['expression': tgt, 'type': 'compound'], fun, batch, arg, kwargs, timeout)
}
@@ -1232,10 +1259,11 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Targeted nodes to be checked
* @param diff Maximum time difference (in seconds) to be accepted during time sync check
+* @param batch Batch param to salt (integer or string with percents)
* @return bool Return true if time difference is <= diff and returns false if time difference is > diff
*/
-def checkClusterTimeSync(saltId, target) {
+def checkClusterTimeSync(saltId, target, batch = null) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
@@ -1247,7 +1275,7 @@
} else {
diff = 5
}
- out = salt.runSaltProcessStep(saltId, target, 'status.time', '%s')
+ out = salt.runSaltProcessStep(saltId, target, 'status.time', '%s', batch)
outParsed = out['return'][0]
def outKeySet = outParsed.keySet()
for (key in outKeySet) {
@@ -1355,7 +1383,7 @@
if (res) {
for (int i = 0; i < res.size(); i++) {
def key = res.keySet()[i]
- if (!(res[key] instanceof Map && res[key].get(params.packageName, false))) {
+ if (!(res[key] instanceof Map && res[key].get(params.packageName.toString(), false))) {
return false
}
}
@@ -1364,3 +1392,18 @@
return false
}
}
+
+/**
+* Returns nubmer of worker_threads set for Salt Master
+*
+* @param saltId Salt Connection object or pepperEnv
+*
+*/
+def getWorkerThreads(saltId) {
+ if (env.getEnvironment().containsKey('SALT_MASTER_OPT_WORKER_THREADS')) {
+ return env['SALT_MASTER_OPT_WORKER_THREADS'].toString()
+ }
+ def threads = cmdRun(saltId, "I@salt:master", "cat /etc/salt/master.d/master.conf | grep worker_threads | cut -f 2 -d ':'", true, null, true)
+ return threads['return'][0].values()[0].replaceAll('Salt command execution success','').trim()
+}
+