Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0
7c3bd95 Update scripts for restoring failed node of Mysql in Galera cluster
1944851 Make safe condition for resource.changes
bcf56fb Call xtrabackup.client.restore to trigger restore
8a9e593 Don't remove mysql data in case of RESTART_CLUSTER
81a0b8c Use pkg.list_pkgs function to determine package state
3870245 Run salt minion restart and do not wait for response
1480c7e Fix isPackageInstalled method when salt return is empty
108da66 Add retries to gnocchi.cleint
ac8bcce Fix getWsrepParameters behavior and Update getGaleraLastShutdownNode and getWsrepParameters
f206346 Add new startFirstNode function
0c4dc2c Add condition for docker pull operation
82d0545 Update git mirror to work with http creds
d8dd2c9 Add restartSaltMinion and upgradePackageAndRestartSaltMinion
fb026be Consider null an error value during mysql verification
f48bb10 Add check for disks i/o utilization to verifyGaleraStatus method
e48741b Split restoreGaleraDb function and add restoreType param support
7c8ac9a Improve error logging for galera recovery pipeline
f89f9b4 Move Galera method to new separate class
0c1e278 Add output of not responding minions to minionsReachable function
ad8e95b Add isPackageInstalled and getIostatValues methods to Salt.groovy class
Change-Id: I161d773461b4f203dd48c39d32645e5aac70acd8
diff --git a/src/com/mirantis/mk/Galera.groovy b/src/com/mirantis/mk/Galera.groovy
new file mode 100644
index 0000000..3a10a1c
--- /dev/null
+++ b/src/com/mirantis/mk/Galera.groovy
@@ -0,0 +1,405 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Galera functions
+ *
+ */
+
+
+/**
+ * Returns parameters from mysql.status output on given target node
+ *
+ * @param env Salt Connection object or pepperEnv
+ * @param target Targeted node
+ * @param parameters Parameters to be retruned (String or list of Strings). If no parameters are provided or is set to '[]', it returns all of them.
+ * @return result List of parameters with its values
+ */
+
+def getWsrepParameters(env, target, parameters=[], print=false) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ result = [:]
+ out = salt.runSaltProcessStep(env, "${target}", "mysql.status", [], null, false)
+ outlist = out['return'][0]
+ resultYaml = outlist.get(outlist.keySet()[0]).sort()
+ if (print) {
+ common.prettyPrint(resultYaml)
+ }
+ if (parameters instanceof String) {
+ parameters = [parameters]
+ }
+ if (parameters == [] || parameters == ['']) {
+ result = resultYaml
+ } else {
+ for (String param in parameters) {
+ value = resultYaml[param]
+ if (value instanceof String && value.isBigDecimal()) {
+ value = value.toBigDecimal()
+ }
+ result[param] = value
+ }
+ }
+ return result
+}
+
+/**
+ * Verifies Galera database
+ *
+ * This function checks for Galera master, tests connection and if reachable, it obtains the result
+ * of Salt mysql.status function. The result is then parsed, validated and outputed to the user.
+ *
+ * @param env Salt Connection object or pepperEnv
+ * @param slave Boolean value to enable slave checking (if master in unreachable)
+ * @param checkTimeSync Boolean value to enable time sync check
+ * @return resultCode int values used to determine exit status in the calling function
+ */
+def verifyGaleraStatus(env, slave=false, checkTimeSync=false) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def out = ""
+ def status = "unknown"
+ def testNode = ""
+ if (!slave) {
+ try {
+ galeraMaster = salt.getMinions(env, "I@galera:master")
+ common.infoMsg("Current Galera master is: ${galeraMaster}")
+ salt.minionsReachable(env, "I@salt:master", "I@galera:master")
+ testNode = "I@galera:master"
+ } catch (Exception e) {
+ common.errorMsg('Galera master is not reachable.')
+ common.errorMsg(e.getMessage())
+ return 128
+ }
+ } else {
+ try {
+ galeraSlaves = salt.getMinions(env, "I@galera:slave")
+ common.infoMsg("Testing Galera slave minions: ${galeraSlaves}")
+ } catch (Exception e) {
+ common.errorMsg("Cannot obtain Galera slave minions list.")
+ common.errorMsg(e.getMessage())
+ return 129
+ }
+ for (minion in galeraSlaves) {
+ try {
+ salt.minionsReachable(env, "I@salt:master", minion)
+ testNode = minion
+ break
+ } catch (Exception e) {
+ common.warningMsg("Slave '${minion}' is not reachable.")
+ }
+ }
+ }
+ if (!testNode) {
+ common.errorMsg("No Galera slave was reachable.")
+ return 130
+ }
+ def checkTargets = salt.getMinions(env, "I@xtrabackup:client or I@xtrabackup:server")
+ for (checkTarget in checkTargets) {
+ def nodeStatus = salt.minionsReachable(env, 'I@salt:master', checkTarget, null, 10, 5)
+ if (nodeStatus != null) {
+ def iostatRes = salt.getIostatValues(['saltId': env, 'target': checkTarget, 'parameterName': "%util", 'output': true])
+ if (iostatRes == [:]) {
+ common.errorMsg("Recevived empty response from iostat call on ${checkTarget}. Maybe 'sysstat' package is not installed?")
+ return 140
+ }
+ for (int i = 0; i < iostatRes.size(); i++) {
+ def diskKey = iostatRes.keySet()[i]
+ if (!(iostatRes[diskKey].toString().isBigDecimal() && (iostatRes[diskKey].toBigDecimal() < 50 ))) {
+ common.errorMsg("Disk ${diskKey} has to high i/o utilization. Maximum value is 50 and current value is ${iostatRes[diskKey]}.")
+ return 141
+ }
+ }
+ }
+ }
+ common.infoMsg("Disk i/o utilization was checked and everything seems to be in order.")
+ if (checkTimeSync && !salt.checkClusterTimeSync(env, "I@galera:master or I@galera:slave")) {
+ common.errorMsg("Time in cluster is desynchronized or it couldn't be detemined. You should fix this issue manually before proceeding.")
+ return 131
+ }
+ try {
+ out = salt.runSaltProcessStep(env, "${testNode}", "mysql.status", [], null, false)
+ } catch (Exception e) {
+ common.errorMsg('Could not determine mysql status.')
+ common.errorMsg(e.getMessage())
+ return 256
+ }
+ if (out) {
+ try {
+ status = validateAndPrintGaleraStatusReport(env, out, testNode)
+ } catch (Exception e) {
+ common.errorMsg('Could not parse the mysql status output. Check it manually.')
+ common.errorMsg(e.getMessage())
+ return 1
+ }
+ } else {
+ common.errorMsg("Mysql status response unrecognized or is empty. Response: ${out}")
+ return 1024
+ }
+ if (status == "OK") {
+ common.infoMsg("No errors found - MySQL status is ${status}.")
+ return 0
+ } else if (status == "unknown") {
+ common.warningMsg('MySQL status cannot be detemined')
+ return 1
+ } else {
+ common.errorMsg("Errors found.")
+ return 2
+ }
+}
+
+/** Validates and prints result of verifyGaleraStatus function
+@param env Salt Connection object or pepperEnv
+@param out Output of the mysql.status Salt function
+@return status "OK", "ERROR" or "uknown" depending on result of validation
+*/
+
+def validateAndPrintGaleraStatusReport(env, out, minion) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ if (minion == "I@galera:master") {
+ role = "master"
+ } else {
+ role = "slave"
+ }
+ sizeOut = salt.getReturnValues(salt.getPillar(env, minion, "galera:${role}:members"))
+ expected_cluster_size = sizeOut.size()
+ outlist = out['return'][0]
+ resultYaml = outlist.get(outlist.keySet()[0]).sort()
+ common.prettyPrint(resultYaml)
+ parameters = [
+ wsrep_cluster_status: [title: 'Cluster status', expectedValues: ['Primary'], description: ''],
+ wsrep_cluster_size: [title: 'Current cluster size', expectedValues: [expected_cluster_size], description: ''],
+ wsrep_ready: [title: 'Node status', expectedValues: ['ON', true], description: ''],
+ wsrep_local_state_comment: [title: 'Node status comment', expectedValues: ['Joining', 'Waiting on SST', 'Joined', 'Synced', 'Donor'], description: ''],
+ wsrep_connected: [title: 'Node connectivity', expectedValues: ['ON', true], description: ''],
+ wsrep_local_recv_queue_avg: [title: 'Average size of local reveived queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 means that the node cannot apply write-sets as fast as it receives them, which can lead to replication throttling)'],
+ wsrep_local_send_queue_avg: [title: 'Average size of local send queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 indicate replication throttling or network throughput issues, such as a bottleneck on the network link.)']
+ ]
+ for (key in parameters.keySet()) {
+ value = resultYaml[key]
+ if (value instanceof String && value.isBigDecimal()) {
+ value = value.toBigDecimal()
+ }
+ parameters.get(key) << [actualValue: value]
+ }
+ for (key in parameters.keySet()) {
+ param = parameters.get(key)
+ if (key == 'wsrep_local_recv_queue_avg' || key == 'wsrep_local_send_queue_avg') {
+ if (param.get('actualValue') == null || (param.get('actualValue') > param.get('expectedThreshold').get('error'))) {
+ param << [match: 'error']
+ } else if (param.get('actualValue') > param.get('expectedThreshold').get('warn')) {
+ param << [match: 'warn']
+ } else {
+ param << [match: 'ok']
+ }
+ } else {
+ for (expValue in param.get('expectedValues')) {
+ if (expValue == param.get('actualValue')) {
+ param << [match: 'ok']
+ break
+ } else {
+ param << [match: 'error']
+ }
+ }
+ }
+ }
+ cluster_info_report = []
+ cluster_warning_report = []
+ cluster_error_report = []
+ for (key in parameters.keySet()) {
+ param = parameters.get(key)
+ if (param.containsKey('expectedThreshold')) {
+ expValues = "below ${param.get('expectedThreshold').get('warn')}"
+ } else {
+ if (param.get('expectedValues').size() > 1) {
+ expValues = param.get('expectedValues').join(' or ')
+ } else {
+ expValues = param.get('expectedValues')[0]
+ }
+ }
+ reportString = "${param.title}: ${param.actualValue} (Expected: ${expValues}) ${param.description}"
+ if (param.get('match').equals('ok')) {
+ cluster_info_report.add("[OK ] ${reportString}")
+ } else if (param.get('match').equals('warn')) {
+ cluster_warning_report.add("[WARNING] ${reportString}")
+ } else {
+ cluster_error_report.add("[ ERROR] ${reportString})")
+ }
+ }
+ common.infoMsg("CLUSTER STATUS REPORT: ${cluster_info_report.size()} expected values, ${cluster_warning_report.size()} warnings and ${cluster_error_report.size()} error found:")
+ if (cluster_info_report.size() > 0) {
+ common.infoMsg(cluster_info_report.join('\n'))
+ }
+ if (cluster_warning_report.size() > 0) {
+ common.warningMsg(cluster_warning_report.join('\n'))
+ }
+ if (cluster_error_report.size() > 0) {
+ common.errorMsg(cluster_error_report.join('\n'))
+ return "ERROR"
+ } else {
+ return "OK"
+ }
+}
+
+/** Returns last shutdown node of Galera cluster
+@param env Salt Connection object or pepperEnv
+@param nodes List of nodes to check only (defaults to []). If not provided, it will check all nodes.
+ Use this parameter if the cluster splits to several components and you only want to check one fo them.
+@return status ip address or hostname of last shutdown node
+*/
+
+def getGaleraLastShutdownNode(env, nodes = []) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ members = []
+ lastNode = [ip: '', seqno: -2]
+ try {
+ if (nodes) {
+ nodes = salt.getIPAddressesForNodenames(env, nodes)
+ for (node in nodes) {
+ members = [host: "${node.get(node.keySet()[0])}"] + members
+ }
+ } else {
+ members = salt.getReturnValues(salt.getPillar(env, "I@galera:master", "galera:master:members"))
+ }
+ } catch (Exception e) {
+ common.errorMsg('Could not retrieve members list')
+ common.errorMsg(e.getMessage())
+ return 'I@galera:master'
+ }
+ if (members) {
+ for (member in members) {
+ try {
+ salt.minionsReachable(env, 'I@salt:master', "S@${member.host}")
+ out = salt.getReturnValues(salt.cmdRun(env, "S@${member.host}", 'cat /var/lib/mysql/grastate.dat | grep "seqno" | cut -d ":" -f2', true, null, false))
+ seqno = out.tokenize('\n')[0].trim()
+ if (seqno.isNumber()) {
+ seqno = seqno.toInteger()
+ } else {
+ // in case if /var/lib/mysql/grastate.dat has no any seqno - set it to 0
+ // thus node will be recovered if no other failed found
+ seqno = 0
+ }
+ } catch (Exception e) {
+ common.warningMsg("Could not determine 'seqno' value for node ${member.host} ")
+ common.warningMsg(e.getMessage())
+ seqno = 0
+ }
+ highestSeqno = lastNode.get('seqno')
+ if (seqno > highestSeqno) {
+ lastNode << [ip: "${member.host}", seqno: seqno]
+ }
+ }
+ }
+ if (lastNode.get('ip') != '') {
+ return "S@${lastNode.ip}"
+ } else {
+ return "I@galera:master"
+ }
+}
+
+/**
+ * Wrapper around Mysql systemd service
+ * @param env Salt Connection object or pepperEnv
+ * @param targetNode Node to apply changes
+ * @param checkStatus Whether to check status of Mysql
+ * @param checkState State of service to check
+*/
+def manageServiceMysql(env, targetNode, action, checkStatus=true, checkState='running') {
+ def salt = new com.mirantis.mk.Salt()
+ salt.runSaltProcessStep(env, lastNodeTarget, "service.${action}", ['mysql'])
+ if (checkStatus) {
+ try {
+ salt.commandStatus(env, lastNodeTarget, 'service mysql status', checkState)
+ } catch (Exception er) {
+ input message: "Database is not running please fix it first and only then click on PROCEED."
+ }
+ }
+}
+
+/**
+ * Restores Galera cluster
+ * @param env Salt Connection object or pepperEnv
+ * @param runRestoreDb Boolean to determine if the restoration of DB should be run as well
+ * @return output of salt commands
+ */
+def restoreGaleraCluster(env, runRestoreDb=true) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ lastNodeTarget = getGaleraLastShutdownNode(env)
+ manageServiceMysql(env, lastNodeTarget, 'stop', false)
+ if (runRestoreDb) {
+ salt.cmdRun(env, lastNodeTarget, "mkdir -p /root/mysql/mysql.bak")
+ salt.cmdRun(env, lastNodeTarget, "rm -rf /root/mysql/mysql.bak/*")
+ salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+ }
+ salt.cmdRun(env, lastNodeTarget, "rm -f /var/lib/mysql/.galera_bootstrap")
+
+ // make sure that gcom parameter is empty
+ salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+
+ // run restore of DB
+ if (runRestoreDb) {
+ restoreGaleraDb(env, lastNodeTarget)
+ }
+
+ manageServiceMysql(env, lastNodeTarget, 'start')
+
+ // apply any changes in configuration and return value to gcom parameter and then restart mysql to catch
+ salt.enforceState(['saltId': env, 'target': lastNodeTarget, 'state': 'galera'])
+ manageServiceMysql(env, lastNodeTarget, 'restart')
+}
+
+/**
+ * Restores Galera database
+ * @param env Salt Connection object or pepperEnv
+ * @param targetNode Node to be targeted
+ */
+def restoreGaleraDb(env, targetNode) {
+ def salt = new com.mirantis.mk.Salt()
+ def backup_dir = salt.getReturnValues(salt.getPillar(env, targetNode, 'xtrabackup:client:backup_dir'))
+ if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+ salt.runSaltProcessStep(env, targetNode, 'file.remove', ["${backup_dir}/dbrestored"])
+ salt.enforceState(['saltId': env, 'target': targetNode, 'state': 'xtrabackup.client'])
+ salt.enforceState(['saltId': env, 'target': targetNode, 'state': 'xtrabackup.client.restore'])
+}
+
+def restoreGaleraDb(env) {
+ def common = new com.mirantis.mk.Common()
+ common.warningMsg("This method was renamed to 'restoreGaleraCluster'. Please change your pipeline to use this call instead! If you think that you really wanted to call 'restoreGaleraDb' you may be missing 'targetNode' parameter in you call.")
+ return restoreGaleraCluster(env)
+}
+
+/**
+ * Start first node in mysql cluster. Cluster members stay removed in mysql config, additional service restart will be needed once all nodes are up.
+ * https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/tshooting/
+ * tshoot-mcp-openstack/tshoot-galera/restore-galera-cluster/
+ * restore-galera-manually.html#restore-galera-manually
+ *
+ * @param env Salt Connection object or pepperEnv
+ * @param target last stopped Galera node
+ * @return output of salt commands
+ */
+def startFirstNode(env, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+
+ // make sure that gcom parameter is empty
+ salt.cmdRun(env, target, "sed -i '/wsrep_cluster_address/ s/^#*/#/' /etc/mysql/my.cnf")
+ salt.cmdRun(env, target, "sed -i '/wsrep_cluster_address/a wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+
+ // start mysql service on the last node
+ salt.runSaltProcessStep(env, target, 'service.start', ['mysql'])
+
+ // wait until mysql service on the last node is up
+
+ common.retry(30, 10) {
+ value = getWsrepParameters(env, target, 'wsrep_evs_state')
+ if (value['wsrep_evs_state'] == 'OPERATIONAL') {
+ common.infoMsg('WSREP state: OPERATIONAL')
+ } else {
+ throw new Exception("Mysql service is not running please fix it.")
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/com/mirantis/mk/Git.groovy b/src/com/mirantis/mk/Git.groovy
index 919acb9..51caed9 100644
--- a/src/com/mirantis/mk/Git.groovy
+++ b/src/com/mirantis/mk/Git.groovy
@@ -147,8 +147,7 @@
*
* @param sourceUrl Source git repository
* @param targetUrl Target git repository
- * @param credentialsId Credentials id to use for accessing source/target
- * repositories
+ * @param credentialsId Credentials id to use for accessing target repositories
* @param branches List or comma-separated string of branches to sync
* @param followTags Mirror tags
* @param pushSource Push back into source branch, resulting in 2-way sync
@@ -156,25 +155,54 @@
* @param gitEmail Email for creation of merge commits
* @param gitName Name for creation of merge commits
*/
-def mirrorGit(sourceUrl, targetUrl, credentialsId, branches, followTags = false, pushSource = false, pushSourceTags = false, gitEmail = 'jenkins@localhost', gitName = 'Jenkins') {
+def mirrorGit(sourceUrl, targetUrl, credentialsId, branches, followTags = false, pushSource = false, pushSourceTags = false, gitEmail = 'jenkins@localhost', gitName = 'Jenkins', sourceRemote = 'origin') {
def common = new com.mirantis.mk.Common()
def ssh = new com.mirantis.mk.Ssh()
if (branches instanceof String) {
branches = branches.tokenize(',')
}
+ // If both source and target repos are secured and accessible via http/https,
+ // we need to switch GIT_ASKPASS value when running git commands
+ def sourceAskPass
+ def targetAskPass
- ssh.prepareSshAgentKey(credentialsId)
- ssh.ensureKnownHosts(targetUrl)
+ def sshCreds = common.getCredentialsById(credentialsId, 'sshKey') // True if found
+ if (sshCreds) {
+ ssh.prepareSshAgentKey(credentialsId)
+ ssh.ensureKnownHosts(targetUrl)
+ sh "git config user.name '${gitName}'"
+ } else {
+ withCredentials([[$class : 'UsernamePasswordMultiBinding',
+ credentialsId : credentialsId,
+ passwordVariable: 'GIT_PASSWORD',
+ usernameVariable: 'GIT_USERNAME']]) {
+ sh """
+ set +x
+ git config --global credential.${targetUrl}.username \${GIT_USERNAME}
+ echo "echo \${GIT_PASSWORD}" > ${WORKSPACE}/${credentialsId}_askpass.sh
+ chmod +x ${WORKSPACE}/${credentialsId}_askpass.sh
+ git config user.name \${GIT_USERNAME}
+ """
+ sourceAskPass = env.GIT_ASKPASS ?: ''
+ targetAskPass = "${WORKSPACE}/${credentialsId}_askpass.sh"
+ }
+ }
sh "git config user.email '${gitEmail}'"
- sh "git config user.name '${gitName}'"
def remoteExistence = sh(script: "git remote -v | grep ${TARGET_URL} | grep target", returnStatus: true)
- if(remoteExistence != 0){
- // silently try to remove target
- sh(script:"git remote remove target", returnStatus: true)
- sh("git remote add target ${TARGET_URL}")
+ if(remoteExistence == 0) {
+ // silently try to remove target
+ sh(script: "git remote remove target", returnStatus: true)
}
- ssh.agentSh "git remote update --prune"
+ sh("git remote add target ${TARGET_URL}")
+ if (sshCreds) {
+ ssh.agentSh "git remote update --prune"
+ } else {
+ env.GIT_ASKPASS = sourceAskPass
+ sh "git remote update ${sourceRemote} --prune"
+ env.GIT_ASKPASS = targetAskPass
+ sh "git remote update target --prune"
+ }
for (i=0; i < branches.size; i++) {
branch = branches[i]
@@ -190,21 +218,41 @@
sh "git ls-tree target/${branch} && git merge --no-edit --ff target/${branch} || echo 'Target repository is empty, skipping merge'"
followTagsArg = followTags ? "--follow-tags" : ""
- ssh.agentSh "git push ${followTagsArg} target HEAD:${branch}"
+ if (sshCreds) {
+ ssh.agentSh "git push ${followTagsArg} target HEAD:${branch}"
+ } else {
+ sh "git push ${followTagsArg} target HEAD:${branch}"
+ }
if (pushSource == true) {
followTagsArg = followTags && pushSourceTags ? "--follow-tags" : ""
- ssh.agentSh "git push ${followTagsArg} origin HEAD:${branch}"
+ if (sshCreds) {
+ ssh.agentSh "git push ${followTagsArg} origin HEAD:${branch}"
+ } else {
+ sh "git push ${followTagsArg} origin HEAD:${branch}"
+ }
}
}
if (followTags == true) {
- ssh.agentSh "git push -f target --tags"
+ if (sshCreds) {
+ ssh.agentSh "git push -f target --tags"
+ } else {
+ sh "git push -f target --tags"
+ }
if (pushSourceTags == true) {
- ssh.agentSh "git push -f origin --tags"
+ if (sshCreds) {
+ ssh.agentSh "git push -f origin --tags"
+ } else {
+ sh "git push -f origin --tags"
+ }
}
}
sh "git remote rm target"
+ if (!sshCreds) {
+ sh "set +x; rm -f ${targetAskPass}"
+ sh "git config --global --unset credential.${targetUrl}.username"
+ }
}
diff --git a/src/com/mirantis/mk/Openstack.groovy b/src/com/mirantis/mk/Openstack.groovy
index c9e74fd..b11f628 100644
--- a/src/com/mirantis/mk/Openstack.groovy
+++ b/src/com/mirantis/mk/Openstack.groovy
@@ -526,280 +526,30 @@
}
}
-/**
- * Verifies Galera database
- *
- * This function checks for Galera master, tests connection and if reachable, it obtains the result
- * of Salt mysql.status function. The result is then parsed, validated and outputed to the user.
- *
- * @param env Salt Connection object or pepperEnv
- * @param slave Boolean value to enable slave checking (if master in unreachable)
- * @param checkTimeSync Boolean value to enable time sync check
- * @return resultCode int values used to determine exit status in the calling function
- */
def verifyGaleraStatus(env, slave=false, checkTimeSync=false) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def out = ""
- def status = "unknown"
- def testNode = ""
- if (!slave) {
- try {
- galeraMaster = salt.getMinions(env, "I@galera:master")
- common.infoMsg("Current Galera master is: ${galeraMaster}")
- salt.minionsReachable(env, "I@salt:master", "I@galera:master")
- testNode = "I@galera:master"
- } catch (Exception e) {
- common.errorMsg('Galera master is not reachable.')
- return 128
- }
- } else {
- try {
- galeraMinions = salt.getMinions(env, "I@galera:slave")
- common.infoMsg("Testing Galera slave minions: ${galeraMinions}")
- } catch (Exception e) {
- common.errorMsg("Cannot obtain Galera slave minions list.")
- return 129
- }
- for (minion in galeraMinions) {
- try {
- salt.minionsReachable(env, "I@salt:master", minion)
- testNode = minion
- break
- } catch (Exception e) {
- common.warningMsg("Slave '${minion}' is not reachable.")
- }
- }
- }
- if (!testNode) {
- common.errorMsg("No Galera slave was reachable.")
- return 130
- }
- if (checkTimeSync && !salt.checkClusterTimeSync(env, "I@galera:master or I@galera:slave")) {
- common.errorMsg("Time in cluster is desynchronized or it couldn't be detemined. You should fix this issue manually before proceeding.")
- return 131
- }
- try {
- out = salt.cmdRun(env, "I@salt:master", "salt -C '${testNode}' mysql.status")
- } catch (Exception e) {
- common.errorMsg('Could not determine mysql status.')
- return 256
- }
- if (out) {
- try {
- status = validateAndPrintGaleraStatusReport(env, out, testNode)
- } catch (Exception e) {
- common.errorMsg('Could not parse the mysql status output. Check it manually.')
- return 1
- }
- } else {
- common.errorMsg("Mysql status response unrecognized or is empty. Response: ${out}")
- return 1024
- }
- if (status == "OK") {
- common.infoMsg("No errors found - MySQL status is ${status}.")
- return 0
- } else if (status == "unknown") {
- common.warningMsg('MySQL status cannot be detemined')
- return 1
- } else {
- common.errorMsg("Errors found.")
- return 2
- }
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("verifyGaleraStatus method was moved to Galera class. Please change your calls accordingly.")
+ return galera.verifyGaleraStatus(env, slave, checkTimeSync)
}
-/** Validates and prints result of verifyGaleraStatus function
-@param env Salt Connection object or pepperEnv
-@param out Output of the mysql.status Salt function
-@return status "OK", "ERROR" or "uknown" depending on result of validation
-*/
-
def validateAndPrintGaleraStatusReport(env, out, minion) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- if (minion == "I@galera:master") {
- role = "master"
- } else {
- role = "slave"
- }
- sizeOut = salt.getReturnValues(salt.getPillar(env, minion, "galera:${role}:members"))
- expected_cluster_size = sizeOut.size()
- outlist = out['return'][0]
- resultString = outlist.get(outlist.keySet()[0]).replace("\n ", " ").replace(" ", "").replace("Salt command execution success", "").replace("----------", "").replace(": \n", ": no value\n")
- resultYaml = readYaml text: resultString
- parameters = [
- wsrep_cluster_status: [title: 'Cluster status', expectedValues: ['Primary'], description: ''],
- wsrep_cluster_size: [title: 'Current cluster size', expectedValues: [expected_cluster_size], description: ''],
- wsrep_ready: [title: 'Node status', expectedValues: ['ON', true], description: ''],
- wsrep_local_state_comment: [title: 'Node status comment', expectedValues: ['Joining', 'Waiting on SST', 'Joined', 'Synced', 'Donor'], description: ''],
- wsrep_connected: [title: 'Node connectivity', expectedValues: ['ON', true], description: ''],
- wsrep_local_recv_queue_avg: [title: 'Average size of local reveived queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 means that the node cannot apply write-sets as fast as it receives them, which can lead to replication throttling)'],
- wsrep_local_send_queue_avg: [title: 'Average size of local send queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 indicate replication throttling or network throughput issues, such as a bottleneck on the network link.)']
- ]
- for (key in parameters.keySet()) {
- value = resultYaml[key]
- parameters.get(key) << [actualValue: value]
- }
- for (key in parameters.keySet()) {
- param = parameters.get(key)
- if (key == 'wsrep_local_recv_queue_avg' || key == 'wsrep_local_send_queue_avg') {
- if (param.get('actualValue') > param.get('expectedThreshold').get('error')) {
- param << [match: 'error']
- } else if (param.get('actualValue') > param.get('expectedThreshold').get('warn')) {
- param << [match: 'warn']
- } else {
- param << [match: 'ok']
- }
- } else {
- for (expValue in param.get('expectedValues')) {
- if (expValue == param.get('actualValue')) {
- param << [match: 'ok']
- break
- } else {
- param << [match: 'error']
- }
- }
- }
- }
- cluster_info_report = []
- cluster_warning_report = []
- cluster_error_report = []
- for (key in parameters.keySet()) {
- param = parameters.get(key)
- if (param.containsKey('expectedThreshold')) {
- expValues = "below ${param.get('expectedThreshold').get('warn')}"
- } else {
- if (param.get('expectedValues').size() > 1) {
- expValues = param.get('expectedValues').join(' or ')
- } else {
- expValues = param.get('expectedValues')[0]
- }
- }
- reportString = "${param.title}: ${param.actualValue} (Expected: ${expValues}) ${param.description}"
- if (param.get('match').equals('ok')) {
- cluster_info_report.add("[OK ] ${reportString}")
- } else if (param.get('match').equals('warn')) {
- cluster_warning_report.add("[WARNING] ${reportString}")
- } else {
- cluster_error_report.add("[ ERROR] ${reportString})")
- }
- }
- common.infoMsg("CLUSTER STATUS REPORT: ${cluster_info_report.size()} expected values, ${cluster_warning_report.size()} warnings and ${cluster_error_report.size()} error found:")
- if (cluster_info_report.size() > 0) {
- common.infoMsg(cluster_info_report.join('\n'))
- }
- if (cluster_warning_report.size() > 0) {
- common.warningMsg(cluster_warning_report.join('\n'))
- }
- if (cluster_error_report.size() > 0) {
- common.errorMsg(cluster_error_report.join('\n'))
- return "ERROR"
- } else {
- return "OK"
- }
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("validateAndPrintGaleraStatusReport method was moved to Galera class. Please change your calls accordingly.")
+ return galera.validateAndPrintGaleraStatusReport(env, out, minion)
}
def getGaleraLastShutdownNode(env) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- members = ''
- lastNode = [ip: '', seqno: -2]
- try {
- members = salt.getReturnValues(salt.getPillar(env, "I@galera:master", "galera:master:members"))
- } catch (Exception er) {
- common.errorMsg('Could not retrieve members list')
- return 'I@galera:master'
- }
- if (members) {
- for (member in members) {
- try {
- salt.minionsReachable(env, 'I@salt:master', "S@${member.host}")
- out = salt.getReturnValues(salt.cmdRun(env, "S@${member.host}", 'cat /var/lib/mysql/grastate.dat | grep "seqno" | cut -d ":" -f2', true, null, false))
- seqno = out.tokenize('\n')[0].trim()
- if (seqno.isNumber()) {
- seqno = seqno.toInteger()
- } else {
- seqno = -2
- }
- highestSeqno = lastNode.get('seqno')
- if (seqno > highestSeqno) {
- lastNode << [ip: "${member.host}", seqno: seqno]
- }
- } catch (Exception er) {
- common.warningMsg("Could not determine 'seqno' value for node ${member.host} ")
- }
- }
- }
- if (lastNode.get('ip') != '') {
- return "S@${lastNode.ip}"
- } else {
- return "I@galera:master"
- }
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("getGaleraLastShutdownNode method was moved to Galera class. Please change your calls accordingly.")
+ return galera.getGaleraLastShutdownNode(env)
}
-/**
- * Restores Galera database
- * @param env Salt Connection object or pepperEnv
- * @return output of salt commands
- */
def restoreGaleraDb(env) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- try {
- salt.runSaltProcessStep(env, 'I@galera:slave', 'service.stop', ['mysql'])
- } catch (Exception er) {
- common.warningMsg('Mysql service already stopped')
- }
- try {
- salt.runSaltProcessStep(env, 'I@galera:master', 'service.stop', ['mysql'])
- } catch (Exception er) {
- common.warningMsg('Mysql service already stopped')
- }
- lastNodeTarget = getGaleraLastShutdownNode(env)
- try {
- salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
- } catch (Exception er) {
- common.warningMsg('Files are not present')
- }
- try {
- salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/grastate.dat")
- } catch (Exception er) {
- common.warningMsg('Files are not present')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "mkdir /root/mysql/mysql.bak")
- } catch (Exception er) {
- common.warningMsg('Directory already exists')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "rm -rf /root/mysql/mysql.bak/*")
- } catch (Exception er) {
- common.warningMsg('Directory already empty')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* /root/mysql/mysql.bak")
- } catch (Exception er) {
- common.warningMsg('Files were already moved')
- }
- try {
- salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["/var/lib/mysql/.galera_bootstrap"])
- } catch (Exception er) {
- common.warningMsg('File is not present')
- }
- salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
- def backup_dir = salt.getReturnValues(salt.getPillar(env, lastNodeTarget, 'xtrabackup:client:backup_dir'))
- if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
- salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["${backup_dir}/dbrestored"])
- salt.cmdRun(env, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.runSaltProcessStep(env, lastNodeTarget, 'service.start', ['mysql'])
-
- // wait until mysql service on galera master is up
- try {
- salt.commandStatus(env, lastNodeTarget, 'service mysql status', 'running')
- } catch (Exception er) {
- input message: "Database is not running please fix it first and only then click on PROCEED."
- }
-
- salt.runSaltProcessStep(env, "I@galera:master and not ${lastNodeTarget}", 'service.start', ['mysql'])
- salt.runSaltProcessStep(env, "I@galera:slave and not ${lastNodeTarget}", 'service.start', ['mysql'])
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("restoreGaleraDb method was moved to Galera class. Please change your calls accordingly.")
+ return galera.restoreGaleraDb(env)
}
\ No newline at end of file
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index cd7d2f4..055b70b 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -411,8 +411,9 @@
// races, apply on the first node initially
if (salt.testTarget(master, "I@gnocchi:client ${extra_tgt}")) {
first_target = salt.getFirstMinion(master, "I@gnocchi:client ${extra_tgt}")
- salt.enforceState([saltId: master, target: "${first_target} ${extra_tgt}", state: 'gnocchi.client'])
- salt.enforceState([saltId: master, target: "I@gnocchi:client ${extra_tgt}", state: 'gnocchi.client'])
+ // TODO(vsaienko) remove retries when they are moved to gnocchiv1 salt module. Related-Prod: PROD-32186
+ salt.enforceState([saltId: master, target: "${first_target} ${extra_tgt}", state: 'gnocchi.client', retries: 3])
+ salt.enforceState([saltId: master, target: "I@gnocchi:client ${extra_tgt}", state: 'gnocchi.client', retries: 3])
}
// Install gnocchi statsd
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index 7fd8ad0..0a9c78e 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -458,32 +458,45 @@
* You can call this function when salt-master already contains salt keys of the target_nodes
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Should always be salt-master
- * @param target_nodes unique identification of a minion or group of salt minions
+ * @param targetNodes unique identification of a minion or group of salt minions
* @param batch salt batch parameter integer or string with percents (optional, default null - disable batch)
- * @param wait timeout for the salt command if minions do not return (default 10)
+ * @param cmdTimeout timeout for the salt command if minions do not return (default 10)
* @param maxRetries finite number of iterations to check status of a command (default 200)
* @return output of salt command
*/
-def minionsReachable(saltId, target, target_nodes, batch=null, wait = 10, maxRetries = 200) {
+
+def minionsReachable(saltId, target, targetNodes, batch=null, cmdTimeout = 10, maxRetries = 200) {
def common = new com.mirantis.mk.Common()
- def cmd = "salt -t${wait} -C '${target_nodes}' test.ping"
- common.infoMsg("Checking if all ${target_nodes} minions are reachable")
- def count = 0
- while(count < maxRetries) {
+ def cmd = "salt -t${cmdTimeout} -C '${targetNodes}' test.ping"
+ common.infoMsg("Checking if all ${targetNodes} minions are reachable")
+ def retriesCount = 0
+ while(retriesCount < maxRetries) {
Calendar timeout = Calendar.getInstance();
- timeout.add(Calendar.SECOND, wait);
- def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, wait)
+ timeout.add(Calendar.SECOND, cmdTimeout);
+ def out = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', batch, [cmd], null, cmdTimeout)
Calendar current = Calendar.getInstance();
if (current.getTime().before(timeout.getTime())) {
- printSaltCommandResult(out)
- return out
+ common.infoMsg("Successful response received from all targeted nodes.")
+ printSaltCommandResult(out)
+ return out
}
- common.infoMsg("Not all of the targeted '${target_nodes}' minions returned yet. Waiting ...")
- count++
+ def outYaml = readYaml text: getReturnValues(out)
+ def successfulNodes = []
+ def failedNodes = []
+ for (node in outYaml.keySet()) {
+ if (outYaml[node] == true || outYaml[node].toString().toLowerCase() == 'true') {
+ successfulNodes.add(node)
+ } else {
+ failedNodes.add(node)
+ }
+ }
+ common.infoMsg("Not all of the targeted minions returned yet. Successful response from ${successfulNodes}. Still waiting for ${failedNodes}.")
+ retriesCount++
sleep(time: 500, unit: 'MILLISECONDS')
}
}
+
/**
* You can call this function when need to check that all minions are available, free and ready for command execution
* @param config LinkedHashMap config parameter, which contains next:
@@ -518,6 +531,43 @@
}
/**
+ * Restart and wait for salt-minions on target nodes.
+ * @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+ * @param target unique identification of a minion or group of salt minions
+ * @param wait timeout for the salt command if minions do not return (default 10)
+ * @param maxRetries finite number of iterations to check status of a command (default 15)
+ * @param async Run salt minion restart and do not wait for response
+ * @return output of salt command
+ */
+def restartSaltMinion(saltId, target, wait = 10, maxRetries = 15, async = true) {
+ def common = new com.mirantis.mk.Common()
+ common.infoMsg("Restarting salt-minion on ${target} and waiting for they are reachable.")
+ runSaltProcessStep(saltId, target, 'cmd.shell', ['salt-call service.restart salt-minion'], null, true, 60, null, async)
+ checkTargetMinionsReady(['saltId': saltId, 'target': target, timeout: wait, retries: maxRetries])
+ common.infoMsg("All ${target} minions are alive...")
+}
+
+/**
+ * Upgrade package and restart salt minion.
+ * @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+ * @param target unique identification of a minion or group of salt minions
+ * @param the name of pkg_name to upgrade
+ * @param wait timeout for the salt command if minions do not return (default 5)
+ * @param maxRetries finite number of iterations to check status of a command (default 10)
+ * @return output of salt command
+ */
+def upgradePackageAndRestartSaltMinion(saltId, target, pkg_name, wait = 5, maxRetries = 10) {
+ def common = new com.mirantis.mk.Common()
+ def latest_version = getReturnValues(runSaltProcessStep(saltId, target, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+ def current_version = getReturnValues(runSaltProcessStep(saltId, target, 'pkg.version', [pkg_name])).split('\n')[0]
+ if (current_version && latest_version != current_version) {
+ common.infoMsg("Upgrading current ${pkg_name}: ${current_version} to ${latest_version}")
+ runSaltProcessStep(saltId, target, 'pkg.install', [pkg_name], 'only_upgrade=True')
+ restartSaltMinion(saltId, target, wait, maxRetries)
+ }
+}
+
+/**
* Run command on salt minion (salt cmd.run wrapper)
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
@@ -870,12 +920,13 @@
* @param tgt Salt process step target
* @param fun Salt process step function
* @param arg process step arguments (optional, default [])
- * @param batch salt batch parameter integer or string with percents (optional, default null - disable batch)
+ * @param batch salt batch parameter integer or string with percents (optional, default null - disable batch). Can't be used with async
* @param output print output (optional, default true)
* @param timeout Additional argument salt api timeout
+ * @param async Run the salt command but don't wait for a reply. Can't be used with batch
* @return output of salt command
*/
-def runSaltProcessStep(saltId, tgt, fun, arg = [], batch = null, output = true, timeout = -1, kwargs = null) {
+def runSaltProcessStep(saltId, tgt, fun, arg = [], batch = null, output = true, timeout = -1, kwargs = null, async = false) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def out
@@ -884,6 +935,8 @@
if (batch == true) {
out = runSaltCommand(saltId, 'local_batch', ['expression': tgt, 'type': 'compound'], fun, String.valueOf(batch), arg, kwargs, timeout)
+ } else if (async == true) {
+ out = runSaltCommand(saltId, 'local_async', ['expression': tgt, 'type': 'compound'], fun, batch, arg, kwargs, timeout)
} else {
out = runSaltCommand(saltId, 'local', ['expression': tgt, 'type': 'compound'], fun, batch, arg, kwargs, timeout)
}
@@ -955,7 +1008,7 @@
outputResources.add(String.format("Resource: %s\n\u001B[33m%s\u001B[0m", resKey, common.prettify(resource)))
}
}else{
- if(!printOnlyChanges || resource.changes.size() > 0){
+ if(!printOnlyChanges || (resource.changes && resource.changes.size() > 0)) {
outputResources.add(String.format("Resource: %s\n\u001B[32m%s\u001B[0m", resKey, common.prettify(resource)))
}
}
@@ -1214,3 +1267,100 @@
return false
}
}
+
+/**
+* Finds out IP address of the given node or a list of nodes
+*
+* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+* @param nodes Targeted node hostnames to be checked (String or List of strings)
+* @param useGrains If the, the value will be taken from grains. If false, it will be taken from 'hostname' command.
+* @return Map Return result Map in format ['nodeName1': 'ipAdress1', 'nodeName2': 'ipAdress2', ...]
+*/
+
+def getIPAddressesForNodenames(saltId, nodes = [], useGrains = true) {
+ result = [:]
+
+ if (nodes instanceof String) {
+ nodes = [nodes]
+ }
+
+ if (useGrains) {
+ for (String node in nodes) {
+ ip = getReturnValues(getGrain(saltId, node, "fqdn_ip4"))["fqdn_ip4"][0]
+ result[node] = ip
+ }
+ } else {
+ for (String node in nodes) {
+ ip = getReturnValues(cmdRun(saltId, node, "hostname -i")).readLines()[0]
+ result[node] = ip
+ }
+ }
+ return result
+}
+
+/**
+* Checks if required package is installed and returns averaged IO stats for selected disks.
+* Allows getting averaged values of specific parameter for all disks or a specified disk.
+* Interval between checks and its number is parametrized and configurable.
+*
+* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+* @param target Node to be targeted (Should only match 1 node)
+* @param parameterName Name of parameter from 'iostat' output (default = '' -- returns all variables)
+* @param interval Interval between checks (default = 1)
+* @param count Number of checks (default = 5)
+* @param disks Disks to be checked (default = '' -- returns all disks)
+* @param output Print Salt command return (default = true)
+* @return Map Map containing desired values in format ['disk':'value']
+*/
+
+def getIostatValues(Map params) {
+ def common = new com.mirantis.mk.Common()
+ def ret = [:]
+ if (isPackageInstalled(['saltId': params.saltId, 'target': params.target, 'packageName': 'sysstat', 'output': false])) {
+ def arg = [params.get('interval', 1), params.get('count', 5), params.get('disks', '')]
+ def res = getReturnValues(runSaltProcessStep(params.saltId, params.target, 'disk.iostat', arg, null, params.output))
+ if (res instanceof Map) {
+ for (int i = 0; i < res.size(); i++) {
+ def key = res.keySet()[i]
+ if (params.containsKey('parameterName')) {
+ if (res[key].containsKey(params.parameterName)){
+ ret[key] = res[key][params.parameterName]
+ } else {
+ common.errorMsg("Parameter '${params.parameterName}' not found for disk '${key}'. Valid parameter for this disk are: '${res[key].keySet()}'")
+ }
+ } else {
+ return res // If no parameterName is defined, return all of them.
+ }
+ }
+ }
+ } else {
+ common.errorMsg("Package 'sysstat' seems not to be installed on at least one of tageted nodes: ${params.target}. Please fix this to be able to check 'iostat' values. Find more in the docs TODO:<Add docs link>")
+ }
+ return ret
+}
+
+/**
+* Checks if defined package is installed on all nodes defined by target parameter.
+*
+* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+* @param target Node or nodes to be targeted
+* @param packageName Name of package to be checked
+* @param output Print Salt command return (default = true)
+* @return boolean True if package is installed on all defined nodes. False if not found on at least one of defined nodes.
+*/
+
+def isPackageInstalled(Map params) {
+ def output = params.get('output', true)
+ def res = runSaltProcessStep(params.saltId, params.target, "pkg.list_pkgs", [], null, output)['return'][0]
+ if (res) {
+ for (int i = 0; i < res.size(); i++) {
+ def key = res.keySet()[i]
+ if (!(res[key] instanceof Map && res[key].get(params.packageName, false))) {
+ return false
+ }
+ }
+ return true
+ } else {
+ return false
+ }
+}
diff --git a/src/com/mirantis/mk/SaltModelTesting.groovy b/src/com/mirantis/mk/SaltModelTesting.groovy
index 16e469c..7952980 100644
--- a/src/com/mirantis/mk/SaltModelTesting.groovy
+++ b/src/com/mirantis/mk/SaltModelTesting.groovy
@@ -104,7 +104,11 @@
}
def img = docker.image(dockerImageName)
- img.pull()
+ def pull_enabled = config.get('dockerPull', true)
+
+ if ( pull_enabled ) {
+ img.pull()
+ }
try {
img.inside(dockerOptsFinal) {