Merge "Add executeMachineScpCommand method"
diff --git a/src/com/mirantis/mcp/MCPArtifactory.groovy b/src/com/mirantis/mcp/MCPArtifactory.groovy
index 2065332..6f3780a 100644
--- a/src/com/mirantis/mcp/MCPArtifactory.groovy
+++ b/src/com/mirantis/mcp/MCPArtifactory.groovy
@@ -110,6 +110,64 @@
}
/**
+ * Create an empty directory in Artifactory repo
+ *
+ * @param artifactoryURL String, an URL to Artifactory
+ * @param path String, a path to the desired directory including repository name
+ * @param dir String, desired directory name
+ */
+def createDir (String artifactoryURL, String path, String dir) {
+ def url = "${artifactoryURL}/${path}/${dir}/"
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : 'artifactory',
+ passwordVariable: 'ARTIFACTORY_PASSWORD',
+ usernameVariable: 'ARTIFACTORY_LOGIN']
+ ]) {
+ sh "bash -c \"curl -X PUT -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\""
+ }
+}
+
+/**
+ * Move/copy an artifact or a folder to the specified destination
+ *
+ * @param artifactoryURL String, an URL to Artifactory
+ * @param sourcePath String, a source path to the artifact including repository name
+ * @param dstPath String, a destination path to the artifact including repository name
+ * @param copy boolean, whether to copy or move the item, default is move
+ * @param dryRun boolean, whether to perform dry run on not, default is false
+ */
+def moveItem (String artifactoryURL, String sourcePath, String dstPath, boolean copy = false, boolean dryRun = false) {
+ def url = "${artifactoryURL}/api/${copy ? 'copy' : 'move'}/${sourcePath}?to=/${dstPath}&dry=${dryRun ? '1' : '0'}"
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : 'artifactory',
+ passwordVariable: 'ARTIFACTORY_PASSWORD',
+ usernameVariable: 'ARTIFACTORY_LOGIN']
+ ]) {
+ sh "bash -c \"curl -X POST -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\""
+ }
+}
+
+/**
+ * Recursively delete the specified artifact or a folder
+ *
+ * @param artifactoryURL String, an URL to Artifactory
+ * @param itemPath String, a source path to the item including repository name
+ */
+def deleteItem (String artifactoryURL, String itemPath) {
+ def url = "${artifactoryURL}/${itemPath}"
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : 'artifactory',
+ passwordVariable: 'ARTIFACTORY_PASSWORD',
+ usernameVariable: 'ARTIFACTORY_LOGIN']
+ ]) {
+ sh "bash -c \"curl -X DELETE -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\""
+ }
+}
+
+/**
* Get properties for specified artifact in Artifactory
* Returns LinkedHashMap of properties
*
@@ -132,6 +190,31 @@
}
/**
+ * Get checksums of artifact
+ *
+ * @param artifactoryUrl String, an URL ofArtifactory repo
+ * @param repoName Artifact repository name
+ * @param artifactName Artifactory object name
+ * @param checksumType Type of checksum (default md5)
+ */
+
+def getArtifactChecksum(artifactoryUrl, repoName, artifactName, checksumType = 'md5'){
+ def url = "${artifactoryUrl}/api/storage/${repoName}/${artifactName}"
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : 'artifactory',
+ passwordVariable: 'ARTIFACTORY_PASSWORD',
+ usernameVariable: 'ARTIFACTORY_LOGIN']
+ ]) {
+ def result = sh(script: "bash -c \"curl -X GET -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\"",
+ returnStdout: true).trim()
+ }
+
+ def properties = new groovy.json.JsonSlurperClassic().parseText(result)
+ return properties['checksums'][checksumType]
+}
+
+/**
* Check if image with tag exist by provided path
* Returns true or false
*
@@ -141,7 +224,7 @@
* @param artifactoryCreds String, artifactory creds to use. Optional, default is 'artifactory'
*/
def imageExists(String artifactoryURL, String imageRepo, String tag, String artifactoryCreds = 'artifactory') {
- def url = artifactoryURL + '/v2/' + imageRepo + '/manifest/' + tag
+ def url = artifactoryURL + '/v2/' + imageRepo + '/manifests/' + tag
def result
withCredentials([
[$class : 'UsernamePasswordMultiBinding',
diff --git a/src/com/mirantis/mcp/Validate.groovy b/src/com/mirantis/mcp/Validate.groovy
index 4d74114..b1755b2 100644
--- a/src/com/mirantis/mcp/Validate.groovy
+++ b/src/com/mirantis/mcp/Validate.groovy
@@ -41,11 +41,12 @@
* @param env_var Environment variables to set in container
* @param entrypoint Set entrypoint to /bin/bash or leave default
* @param mounts Map with mounts for container
+ * @param output_replacing Maps with regex with should be hide from output (passwords, etc)
**/
def runContainer(Map params){
def common = new com.mirantis.mk.Common()
- defaults = ["name": "cvp", "env_var": [], "entrypoint": true]
+ defaults = ["name": "cvp", "env_var": [], "entrypoint": true, "mounts": [:], "output_replacing": []]
params = defaults + params
def salt = new com.mirantis.mk.Salt()
def variables = ''
@@ -71,8 +72,11 @@
params.mounts.each { local, container ->
mounts = mounts + " -v ${local}:${container}"
}
- salt.cmdRun(params.master, params.target, "docker run -tid --net=host --name=${params.name}" +
- "${mounts} -u root ${entry_point} ${variables} ${params.dockerImageLink}")
+ salt.cmdRun(params.master, params.target,
+ "docker run -tid --net=host --name=${params.name}" +
+ "${mounts} -u root ${entry_point} ${variables} ${params.dockerImageLink}",
+ true, null, true, [],
+ params.output_replacing)
}
def runContainer(master, target, dockerImageLink, name='cvp', env_var=[], entrypoint=true, mounts=[:]){
@@ -291,7 +295,7 @@
}
}
def script = ". ${env.WORKSPACE}/venv/bin/activate; ${settings}" +
- "pytest --junitxml ${output_dir}cvp_sanity.xml --tb=short -sv ${env.WORKSPACE}/cvp-sanity-checks/cvp_checks/tests/${test_set}"
+ "pytest --junitxml ${output_dir}cvp_sanity.xml --tb=short -rs -sv ${env.WORKSPACE}/cvp-sanity-checks/cvp_checks/tests/${test_set}"
withEnv(["SALT_USERNAME=${username}", "SALT_PASSWORD=${password}", "SALT_URL=${salt_url}"]) {
def statusCode = sh script:script, returnStatus:true
}
@@ -362,7 +366,7 @@
}
}
def script = ". ${env.WORKSPACE}/venv/bin/activate; ${settings}" +
- "pytest --junitxml ${output_dir}report.xml --tb=short -sv ${env.WORKSPACE}/${test_set}"
+ "pytest --junitxml ${output_dir}report.xml --tb=short -rs -sv ${env.WORKSPACE}/${test_set}"
withEnv(["SALT_USERNAME=${username}", "SALT_PASSWORD=${password}", "SALT_URL=${salt_url}"]) {
def statusCode = sh script:script, returnStatus:true
}
diff --git a/src/com/mirantis/mk/Artifactory.groovy b/src/com/mirantis/mk/Artifactory.groovy
index 84eb143..824ba4f 100644
--- a/src/com/mirantis/mk/Artifactory.groovy
+++ b/src/com/mirantis/mk/Artifactory.groovy
@@ -429,6 +429,22 @@
}
/**
+ * Get checksums of artifact
+ *
+ * @param art Artifactory connection object
+ * @param artifactName Artifactory object name
+ * @param checksum Type of checksum (default md5)
+ * @param repoName Artifact repository name
+ */
+
+def getArtifactChecksum(art, repoName, artifactName, checksum = 'md5'){
+ def artifactory = new com.mirantis.mk.Artifactory()
+ def uri = "/storage/${repoName}/${artifactName}"
+ def output = artifactory.restGet(art, uri)
+ return output['checksums']["${checksum}"]
+}
+
+/**
* Create Helm repo for Artifactory
*
* @param art Artifactory connection object
diff --git a/src/com/mirantis/mk/Ceph.groovy b/src/com/mirantis/mk/Ceph.groovy
new file mode 100644
index 0000000..bb837b2
--- /dev/null
+++ b/src/com/mirantis/mk/Ceph.groovy
@@ -0,0 +1,92 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Ceph functions
+ *
+ */
+
+/**
+ * Ceph health check
+ *
+ */
+def waitForHealthy(master, target, flags=[], count=0, attempts=300) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+ // wait for healthy cluster
+ while (count < attempts) {
+ def health = salt.cmdRun(master, target, 'ceph health')['return'][0].values()[0]
+ if (health.contains('HEALTH_OK')) {
+ common.infoMsg('Cluster is healthy')
+ break
+ } else {
+ for (flag in flags) {
+ if (health.contains(flag + ' flag(s) set') && !(health.contains('down'))) {
+ common.infoMsg('Cluster is healthy')
+ return
+ }
+ }
+ }
+ common.infoMsg("Ceph health status: ${health}")
+ count++
+ sleep(10)
+ }
+}
+
+/**
+ * Ceph remove partition
+ *
+ */
+def removePartition(master, target, partition_uuid, type='', id=-1) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def partition = ""
+ if (type == 'lockbox') {
+ try {
+ // umount - partition = /dev/sdi2
+ partition = salt.cmdRun(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
+ salt.cmdRun(master, target, "umount ${partition}")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ } else if (type == 'data') {
+ try {
+ // umount - partition = /dev/sdi2
+ partition = salt.cmdRun(master, target, "df | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
+ salt.cmdRun(master, target, "umount ${partition}")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ try {
+ // partition = /dev/sdi2
+ partition = salt.cmdRun(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ } else {
+ try {
+ // partition = /dev/sdi2
+ partition = salt.cmdRun(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ }
+ if (partition?.trim()) {
+ if (partition.contains("nvme")) {
+ // partition = /dev/nvme1n1p2
+ // dev = /dev/nvme1n1
+ def dev = partition.replaceAll('p\\d+$', "")
+ // part_id = 2
+ def part_id = partition.substring(partition.lastIndexOf("p") + 1).replaceAll("[^0-9]+", "")
+
+ } else {
+ // partition = /dev/sdi2
+ // dev = /dev/sdi
+ def dev = partition.replaceAll('\\d+$', "")
+ // part_id = 2
+ def part_id = partition.substring(partition.lastIndexOf("/") + 1).replaceAll("[^0-9]+", "")
+ }
+ salt.cmdRun(master, target, "Ignore | parted ${dev} rm ${part_id}")
+ }
+ return
+}
diff --git a/src/com/mirantis/mk/Common.groovy b/src/com/mirantis/mk/Common.groovy
index c0059bb..401f4f0 100644
--- a/src/com/mirantis/mk/Common.groovy
+++ b/src/com/mirantis/mk/Common.groovy
@@ -147,6 +147,16 @@
}
/**
+ * Print informational message
+ *
+ * @param msg
+ * @param color Colorful output or not
+ */
+def infoSensitivityMsg(msg, color = true, replacing = []) {
+ printSensitivityMsg(msg, "cyan", replacing)
+}
+
+/**
* Print error message
*
* @param msg
@@ -215,6 +225,25 @@
}
/**
+ * Print sensitivity message
+ *
+ * @param msg Message to be printed
+ * @param color Color to use for output
+ * @param replacing List with maps for deletion (passwords, logins, etc).
+ * The first () matching is mandatory !
+ * Example:
+ * [/ (OS_PASSWORD=)(.*?)+ /,
+ * / (password = )(.*?)+ /,
+ * / (password )(.*?) / ]
+ */
+def printSensitivityMsg(msg, color, replacing = []) {
+ for (i in replacing) {
+ msg = msg.replaceAll(i, ' $1XXXXXX ')
+ }
+ printMsg(msg, color)
+}
+
+/**
* Traverse directory structure and return list of files
*
* @param path Path to search
@@ -1023,3 +1052,14 @@
Random rand = new Random(System.currentTimeMillis())
return (1..n).collect { pool[rand.nextInt(pool.size())] }.join()
}
+
+/**
+ * Checks whether string is semver complaint version
+ * @param string version
+*/
+
+def isSemVer(version){
+ // Official regex for Semver2 (https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string)
+ String semVerRegex = /^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$/
+ return version ==~ semVerRegex
+}
\ No newline at end of file
diff --git a/src/com/mirantis/mk/Galera.groovy b/src/com/mirantis/mk/Galera.groovy
index e6a34c1..e5ffe12 100644
--- a/src/com/mirantis/mk/Galera.groovy
+++ b/src/com/mirantis/mk/Galera.groovy
@@ -50,50 +50,44 @@
* of Salt mysql.status function. The result is then parsed, validated and outputed to the user.
*
* @param env Salt Connection object or pepperEnv
- * @param slave Boolean value to enable slave checking (if master in unreachable)
* @param checkTimeSync Boolean value to enable time sync check
* @return resultCode int values used to determine exit status in the calling function
*/
-def verifyGaleraStatus(env, slave=false, checkTimeSync=false) {
+def verifyGaleraStatus(env, checkTimeSync=false) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def out = ""
- def status = "unknown"
- def testNode = ""
- if (!slave) {
- try {
- galeraMaster = salt.getMinions(env, "I@galera:master")
- common.infoMsg("Current Galera master is: ${galeraMaster}")
- salt.minionsReachable(env, "I@salt:master", "I@galera:master")
- testNode = "I@galera:master"
- } catch (Exception e) {
- common.errorMsg('Galera master is not reachable.')
- common.errorMsg(e.getMessage())
- return 128
- }
- } else {
- try {
- galeraSlaves = salt.getMinions(env, "I@galera:slave")
- common.infoMsg("Testing Galera slave minions: ${galeraSlaves}")
- } catch (Exception e) {
- common.errorMsg("Cannot obtain Galera slave minions list.")
- common.errorMsg(e.getMessage())
- return 129
- }
- for (minion in galeraSlaves) {
+ def mysqlStatusReport = [
+ 'clusterMembersOnPower': [],
+ 'clusterMembersNotAvailable': [],
+ 'clusterMembersInClusterAlive': [],
+ 'clusterMembersNotAlive': [],
+ 'error': 0
+ ]
+
+ try {
+ def clusterMembers = salt.getMinions(env, "I@galera:master or I@galera:slave")
+ for (minion in clusterMembers) {
try {
salt.minionsReachable(env, "I@salt:master", minion)
- testNode = minion
- break
+ mysqlStatusReport['clusterMembersOnPower'] << minion
} catch (Exception e) {
common.warningMsg("Slave '${minion}' is not reachable.")
+ mysqlStatusReport['clusterMembersNotAvailable'] << minion
}
}
+ } catch (Exception e) {
+ common.errorMsg('Cannot obtain Galera minions list.')
+ common.errorMsg(e.getMessage())
+ mysqlStatusReport['error'] = 128
+ return mysqlStatusReport
}
- if (!testNode) {
- common.errorMsg("No Galera slave was reachable.")
- return 130
+
+ if (!mysqlStatusReport['clusterMembersOnPower']) {
+ common.errorMsg("No Galera member was reachable.")
+ mysqlStatusReport['error'] = 130
+ return mysqlStatusReport
}
+
def checkTargets = salt.getMinions(env, "I@xtrabackup:client or I@xtrabackup:server")
for (checkTarget in checkTargets) {
def nodeStatus = salt.minionsReachable(env, 'I@salt:master', checkTarget, null, 10, 5)
@@ -101,13 +95,15 @@
def iostatRes = salt.getIostatValues(['saltId': env, 'target': checkTarget, 'parameterName': "%util", 'output': true])
if (iostatRes == [:]) {
common.errorMsg("Recevived empty response from iostat call on ${checkTarget}. Maybe 'sysstat' package is not installed?")
- return 140
+ mysqlStatusReport['error'] = 140
+ return mysqlStatusReport
}
for (int i = 0; i < iostatRes.size(); i++) {
def diskKey = iostatRes.keySet()[i]
if (!(iostatRes[diskKey].toString().isBigDecimal() && (iostatRes[diskKey].toBigDecimal() < 50 ))) {
common.errorMsg("Disk ${diskKey} has to high i/o utilization. Maximum value is 50 and current value is ${iostatRes[diskKey]}.")
- return 141
+ mysqlStatusReport['error'] = 141
+ return mysqlStatusReport
}
}
}
@@ -115,36 +111,65 @@
common.infoMsg("Disk i/o utilization was checked and everything seems to be in order.")
if (checkTimeSync && !salt.checkClusterTimeSync(env, "I@galera:master or I@galera:slave")) {
common.errorMsg("Time in cluster is desynchronized or it couldn't be detemined. You should fix this issue manually before proceeding.")
- return 131
+ mysqlStatusReport['error'] = 131
+ return mysqlStatusReport
}
+
+ for(member in mysqlStatusReport['clusterMembersOnPower']) {
+ def clusterStatus = getWsrepParameters(env, member, 'wsrep_cluster_status')
+ if (clusterStatus['wsrep_cluster_status']) {
+ mysqlStatusReport['clusterMembersInClusterAlive'] << member
+ } else {
+ mysqlStatusReport['clusterMembersNotAlive'] << member
+ }
+ }
+ if (!mysqlStatusReport['clusterMembersInClusterAlive']) {
+ common.errorMsg("Could not determine mysql status, because all nodes are not connected to cluster.")
+ mysqlStatusReport['error'] = 256
+ return mysqlStatusReport
+ }
+ def testNode = mysqlStatusReport['clusterMembersInClusterAlive'].sort().first()
+
try {
- out = salt.runSaltProcessStep(env, "${testNode}", "mysql.status", [], null, false)
+ mysqlStatusReport['statusRaw'] = salt.runSaltProcessStep(env, testNode, "mysql.status", [], null, false)
} catch (Exception e) {
common.errorMsg('Could not determine mysql status.')
common.errorMsg(e.getMessage())
- return 256
+ mysqlStatusReport['error'] = 256
+ return mysqlStatusReport
}
- if (out) {
+
+ def status = "unknown"
+ def galeraMasterNode = salt.getReturnValues(salt.getPillar(env, testNode, "galera:master:enabled")) ? true : false
+
+ if (mysqlStatusReport['statusRaw']) {
try {
- status = validateAndPrintGaleraStatusReport(env, out, testNode)
+ status = validateAndPrintGaleraStatusReport(env, mysqlStatusReport['statusRaw'], testNode, galeraMasterNode)
} catch (Exception e) {
common.errorMsg('Could not parse the mysql status output. Check it manually.')
common.errorMsg(e.getMessage())
- return 1
}
} else {
- common.errorMsg("Mysql status response unrecognized or is empty. Response: ${out}")
- return 1024
+ common.errorMsg("Mysql status response unrecognized or is empty. Response: ${mysqlStatusReport['statusRaw']}")
}
+ if (mysqlStatusReport['clusterMembersNotAvailable']) {
+ common.errorMsg("Next nodes are unavailable: ${mysqlStatusReport['clusterMembersNotAvailable'].join(',')}")
+ }
+ if (mysqlStatusReport['clusterMembersNotAlive']) {
+ common.errorMsg("Next nodes are not connected to cluster: ${mysqlStatusReport['clusterMembersNotAlive'].join(',')}")
+ }
+
if (status == "OK") {
common.infoMsg("No errors found - MySQL status is ${status}.")
- return 0
+ return mysqlStatusReport
} else if (status == "unknown") {
common.warningMsg('MySQL status cannot be detemined')
- return 1
+ mysqlStatusReport['error'] = 1
+ return mysqlStatusReport
} else {
common.errorMsg("Errors found.")
- return 2
+ mysqlStatusReport['error'] = 2
+ return mysqlStatusReport
}
}
@@ -154,13 +179,12 @@
@return status "OK", "ERROR" or "uknown" depending on result of validation
*/
-def validateAndPrintGaleraStatusReport(env, out, minion) {
+def validateAndPrintGaleraStatusReport(env, out, minion, nodeRoleMaster=false) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- if (minion == "I@galera:master") {
- role = "master"
- } else {
- role = "slave"
+ def role = 'slave'
+ if (nodeRoleMaster) {
+ role = 'master'
}
sizeOut = salt.getReturnValues(salt.getPillar(env, minion, "galera:${role}:members"))
expected_cluster_size = sizeOut.size()
@@ -277,15 +301,18 @@
if (seqno.isNumber()) {
seqno = seqno.toInteger()
} else {
- seqno = -2
- }
- highestSeqno = lastNode.get('seqno')
- if (seqno > highestSeqno) {
- lastNode << [ip: "${member.host}", seqno: seqno]
+ // in case if /var/lib/mysql/grastate.dat has no any seqno - set it to 0
+ // thus node will be recovered if no other failed found
+ seqno = 0
}
} catch (Exception e) {
common.warningMsg("Could not determine 'seqno' value for node ${member.host} ")
common.warningMsg(e.getMessage())
+ seqno = 0
+ }
+ highestSeqno = lastNode.get('seqno')
+ if (seqno > highestSeqno) {
+ lastNode << [ip: "${member.host}", seqno: seqno]
}
}
}
@@ -297,88 +324,83 @@
}
/**
+ * Wrapper around Mysql systemd service
+ * @param env Salt Connection object or pepperEnv
+ * @param targetNode Node to apply changes
+ * @param checkStatus Whether to check status of Mysql
+ * @param checkState State of service to check
+*/
+def manageServiceMysql(env, targetNode, action, checkStatus=true, checkState='running') {
+ def salt = new com.mirantis.mk.Salt()
+ salt.runSaltProcessStep(env, targetNode, "service.${action}", ['mysql'])
+ if (checkStatus) {
+ try {
+ salt.commandStatus(env, targetNode, 'service mysql status', checkState)
+ } catch (Exception er) {
+ input message: "Database is not running please fix it first and only then click on PROCEED."
+ }
+ }
+}
+
+/**
* Restores Galera cluster
* @param env Salt Connection object or pepperEnv
- * @param runRestoreDb Boolean to determine if the restoration of DB should be run as well
+ * @param galeraStatus Map, Status of Galera cluster output from verifyGaleraStatus func
+ * @param restoreDb Run restore DB procedure
* @return output of salt commands
*/
-def restoreGaleraCluster(env, runRestoreDb=true) {
+def restoreGaleraCluster(env, galeraStatus, restoreDb=true) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- try {
- salt.runSaltProcessStep(env, 'I@galera:slave', 'service.stop', ['mysql'])
- } catch (Exception er) {
- common.warningMsg('Mysql service already stopped')
- }
- try {
- salt.runSaltProcessStep(env, 'I@galera:master', 'service.stop', ['mysql'])
- } catch (Exception er) {
- common.warningMsg('Mysql service already stopped')
- }
- lastNodeTarget = getGaleraLastShutdownNode(env)
- try {
- salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
- } catch (Exception er) {
- common.warningMsg('Files are not present')
- }
- try {
- salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/grastate.dat")
- } catch (Exception er) {
- common.warningMsg('Files are not present')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "mkdir /root/mysql/mysql.bak")
- } catch (Exception er) {
- common.warningMsg('Directory already exists')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "rm -rf /root/mysql/mysql.bak/*")
- } catch (Exception er) {
- common.warningMsg('Directory already empty')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* /root/mysql/mysql.bak")
- } catch (Exception er) {
- common.warningMsg('Files were already moved')
- }
- try {
- salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["/var/lib/mysql/.galera_bootstrap"])
- } catch (Exception er) {
- common.warningMsg('File is not present')
+ def nodesToRecover = []
+ def total = false // whole cluster
+ if (galeraStatus['clusterMembersNotAlive']) {
+ nodesToRecover = galeraStatus['clusterMembersNotAlive']
+ if (galeraStatus['clusterMembersInClusterAlive'].size() == 0) {
+ total = true
+ }
+ } else {
+ nodesToRecover = galeraStatus['clusterMembersInClusterAlive']
+ total = true
}
- // make sure that gcom parameter is empty
- salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ def lastNodeTarget = ''
+ if (total) {
+ manageServiceMysql(env, 'I@galera:slave', 'stop', true, 'inactive')
+ manageServiceMysql(env, 'I@galera:master', 'stop', true, 'inactive')
+ lastNodeTarget = getGaleraLastShutdownNode(env) // in case if master was already down before
+ salt.cmdRun(env, "( I@galera:master or I@galera:slave ) and not ${lastNodeTarget}", "rm -f /var/lib/mysql/ib_logfile*")
+ salt.cmdRun(env, "( I@galera:master or I@galera:slave ) and not ${lastNodeTarget}", "rm -f /var/lib/mysql/grastate.dat")
+ } else {
+ lastNodeTarget = nodesToRecover.join(' or ')
+ manageServiceMysql(env, lastNodeTarget, 'stop', true, 'inactive')
+ }
- // run restore of DB
- if (runRestoreDb) {
+ if (restoreDb) {
+ def timestamp = common.getDatetime()
+ salt.cmdRun(env, lastNodeTarget, "mkdir -p /root/mysql")
+ def bakDir = salt.getReturnValues(salt.cmdRun(env, lastNodeTarget, "mktemp -d --suffix='_${timestamp}' /root/mysql/mysql.bak.XXXXXX", false))
+ salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* ${bakDir} || echo 'Nothing to backup from directory /var/lib/mysql/'")
+ }
+ if (total) {
+ // make sure that gcom parameter is empty
+ salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ } else if (!restoreDb) {
+ // node rejoin
+ salt.cmdRun(env, lastNodeTarget, "rm -f /var/lib/mysql/ib_logfile*")
+ salt.cmdRun(env, lastNodeTarget, "rm -f /var/lib/mysql/grastate.dat")
+ }
+
+ if (restoreDb) {
restoreGaleraDb(env, lastNodeTarget)
}
- // start mysql service on the last node
- salt.runSaltProcessStep(env, lastNodeTarget, 'service.start', ['mysql'])
+ manageServiceMysql(env, lastNodeTarget, 'start')
- // wait until mysql service on the last node is up
- try {
- salt.commandStatus(env, lastNodeTarget, 'service mysql status', 'running')
- } catch (Exception er) {
- input message: "Database is not running please fix it first and only then click on PROCEED."
+ if (total) {
+ manageServiceMysql(env, "( I@galera:master or I@galera:slave ) and not ${lastNodeTarget}", 'start')
+ salt.runSaltProcessStep(env, lastNodeTarget, 'state.sls_id', ['galera_config', 'galera'])
}
-
- // start mysql services on the rest of the nodes
- salt.runSaltProcessStep(env, "I@galera:master and not ${lastNodeTarget}", 'service.start', ['mysql'])
- salt.runSaltProcessStep(env, "I@galera:slave and not ${lastNodeTarget}", 'service.start', ['mysql'])
-
- // wait until mysql service on the rest of the nodes is up
- try {
- salt.commandStatus(env, "( I@galera:master or I@galera:slave ) and not ${lastNodeTarget}", 'service mysql status', 'running')
- } catch (Exception er) {
- input message: "Database is not running please fix it first and only then click on PROCEED."
- }
-
- // apply any changes in configuration
- salt.enforceState(env, lastNodeTarget, 'galera')
-
}
/**
@@ -387,13 +409,16 @@
* @param targetNode Node to be targeted
*/
def restoreGaleraDb(env, targetNode) {
+ def salt = new com.mirantis.mk.Salt()
def backup_dir = salt.getReturnValues(salt.getPillar(env, targetNode, 'xtrabackup:client:backup_dir'))
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
salt.runSaltProcessStep(env, targetNode, 'file.remove', ["${backup_dir}/dbrestored"])
- salt.cmdRun(env, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.enforceState(['saltId': env, 'target': targetNode, 'state': 'xtrabackup.client'])
+ salt.enforceState(['saltId': env, 'target': targetNode, 'state': 'xtrabackup.client.restore'])
}
def restoreGaleraDb(env) {
+ def common = new com.mirantis.mk.Common()
common.warningMsg("This method was renamed to 'restoreGaleraCluster'. Please change your pipeline to use this call instead! If you think that you really wanted to call 'restoreGaleraDb' you may be missing 'targetNode' parameter in you call.")
return restoreGaleraCluster(env)
}
diff --git a/src/com/mirantis/mk/Gerrit.groovy b/src/com/mirantis/mk/Gerrit.groovy
index b65432d..954600b 100644
--- a/src/com/mirantis/mk/Gerrit.groovy
+++ b/src/com/mirantis/mk/Gerrit.groovy
@@ -383,3 +383,78 @@
}
}
}
+
+/**
+ * Prepare and upload Gerrit commit from prepared repo
+ * @param LinkedHashMap params dict with parameters
+ * venvDir - Absolute path to virtualenv dir
+ * gerritCredentials - credentialsId
+ * gerritHost - gerrit host
+ * gerritPort - gerrit port
+ * repoDir - path to repo dir
+ * repoProject - repo name
+ * repoBranch - repo branch
+ * changeCommitComment - comment for commit message
+ * changeAuthorName - change author
+ * changeAuthorEmail - author email
+ * changeTopic - change topic
+ * gitRemote - git remote
+ * returnChangeInfo - whether to return info about uploaded change
+ *
+ * @return map with change info if returnChangeInfo set to true
+*/
+def prepareGerritAutoCommit(LinkedHashMap params) {
+ def common = new com.mirantis.mk.Common()
+ def git = new com.mirantis.mk.Git()
+ String venvDir = params.get('venvDir')
+ String gerritCredentials = params.get('gerritCredentials')
+ String gerritHost = params.get('gerritHost', 'gerrit.mcp.mirantis.net')
+ String gerritPort = params.get('gerritPort', '29418')
+ String gerritUser = common.getCredentialsById(gerritCredentials, 'sshKey').username
+ String repoDir = params.get('repoDir')
+ String repoProject = params.get('repoProject')
+ String repoBranch = params.get('repoBranch', 'master')
+ String changeCommitComment = params.get('changeCommitComment')
+ String changeAuthorName = params.get('changeAuthorName', 'MCP-CI')
+ String changeAuthorEmail = params.get('changeAuthorEmail', 'mcp-ci-jenkins@ci.mcp.mirantis.net')
+ String changeTopic = params.get('changeTopic', 'auto_ci')
+ Boolean returnChangeInfo = params.get('returnChangeInfo', false)
+ String gitRemote = params.get('gitRemote', '')
+ if (! gitRemote) {
+ dir(repoDir) {
+ gitRemote = sh(
+ script:
+ 'git remote -v | head -n1 | cut -f1',
+ returnStdout: true,
+ ).trim()
+ }
+ }
+ def gerritAuth = ['PORT': gerritPort, 'USER': gerritUser, 'HOST': gerritHost ]
+ def changeParams = ['owner': gerritUser, 'status': 'open', 'project': repoProject, 'branch': repoBranch, 'topic': changeTopic]
+ // find if there is old commit present
+ def gerritChange = findGerritChange(gerritCredentials, gerritAuth, changeParams)
+ def changeId = ''
+ if (gerritChange) {
+ try {
+ def jsonChange = readJSON text: gerritChange
+ changeId = "Change-Id: ${jsonChange['id']}".toString()
+ } catch (Exception error) {
+ common.errorMsg("Can't parse ouput from Gerrit. Check that user ${changeAuthorName} does not have several \
+ open commits to ${repoProject} repo and ${repoBranch} branch with topic ${changeTopic}")
+ throw error
+ }
+ }
+ def commitMessage =
+ """${changeCommitComment}
+
+ |${changeId}
+ """.stripMargin()
+ git.commitGitChanges(repoDir, commitMessage, changeAuthorEmail, changeAuthorName, false)
+ //post change
+ postGerritReview(gerritCredentials, venvDir, repoDir, changeAuthorName, changeAuthorEmail, gitRemote, changeTopic, repoBranch)
+ if (returnChangeInfo) {
+ gerritChange = findGerritChange(gerritCredentials, gerritAuth, changeParams)
+ jsonChange = readJSON text: gerritChange
+ return getGerritChange(gerritUser, gerritHost, jsonChange['number'], gerritCredentials, true)
+ }
+}
diff --git a/src/com/mirantis/mk/Git.groovy b/src/com/mirantis/mk/Git.groovy
index 5743a61..94eb55b 100644
--- a/src/com/mirantis/mk/Git.groovy
+++ b/src/com/mirantis/mk/Git.groovy
@@ -16,21 +16,23 @@
* @param poll Enable git polling (default true)
* @param timeout Set checkout timeout (default 10)
* @param depth Git depth param (default 0 means no depth)
+ * @param reference Git reference param to checkout (default empyt, i.e. no reference)
*/
-def checkoutGitRepository(path, url, branch, credentialsId = null, poll = true, timeout = 10, depth = 0){
+def checkoutGitRepository(path, url, branch, credentialsId = null, poll = true, timeout = 10, depth = 0, reference = ''){
+ def branch_name = reference ? 'FETCH_HEAD' : "*/${branch}"
dir(path) {
checkout(
changelog:true,
poll: poll,
scm: [
$class: 'GitSCM',
- branches: [[name: "*/${branch}"]],
+ branches: [[name: branch_name]],
doGenerateSubmoduleConfigurations: false,
extensions: [
[$class: 'CheckoutOption', timeout: timeout],
- [$class: 'CloneOption', depth: depth, noTags: false, reference: '', shallow: depth > 0, timeout: timeout]],
+ [$class: 'CloneOption', depth: depth, noTags: false, shallow: depth > 0, timeout: timeout]],
submoduleCfg: [],
- userRemoteConfigs: [[url: url, credentialsId: credentialsId]]]
+ userRemoteConfigs: [[url: url, credentialsId: credentialsId, refspec: reference]]]
)
sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
}
@@ -406,3 +408,116 @@
return "${next_version}-${pre_release_meta.join('.')}-${commit_sha}"
}
}
+
+
+/**
+ * Method for uploading a change request
+ *
+ * @param repo String which contains path to directory with git repository
+ * @param credentialsId Credentials id to use for accessing target repositories
+ * @param commit Id of commit which should be uploaded
+ * @param branch Name of the branch for uploading
+ * @param topic Topic of the change
+ *
+ */
+def pushForReview(repo, credentialsId, commit, branch, topic='', remote='origin') {
+ def common = new com.mirantis.mk.Common()
+ def ssh = new com.mirantis.mk.Ssh()
+ common.infoMsg("Uploading commit ${commit} to ${branch} for review...")
+
+ def pushArg = "${commit}:refs/for/${branch}"
+ def process = [:]
+ if (topic){
+ pushArg += '%topic=' + topic
+ }
+ dir(repo){
+ ssh.prepareSshAgentKey(credentialsId)
+ ssh.runSshAgentCommand("git push ${remote} ${pushArg}")
+ }
+}
+
+/**
+ * Generates a commit message with predefined or auto generate change id. If change
+ * id isn't provided, changeIdSeed and current sha of git head will be used in
+ * generation of commit change id.
+ *
+ * @param repo String which contains path to directory with git repository
+ * @param message Commit message main part
+ * @param changeId User defined change-id usually sha1 hash
+ * @param changeIdSeed Custom part of change id which can be added during change id generation
+ *
+ *
+ * @return commitMessage Multiline String with generated commit message
+ */
+def genCommitMessage(repo, message, changeId = '', changeIdSeed = ''){
+ def git = new com.mirantis.mk.Git()
+ def common = new com.mirantis.mk.Common()
+ def commitMessage
+ def id = changeId
+ def seed = changeIdSeed
+ if (!id) {
+ if (!seed){
+ seed = common.generateRandomHashString(32)
+ }
+ def head_sha
+ dir(repo){
+ head_sha = git.getGitCommit()
+ }
+ id = 'I' + sh(script: 'echo -n ' + seed + head_sha + ' | sha1sum | awk \'{print $1}\'', returnStdout: true)
+ }
+ commitMessage =
+ """${message}
+
+ |Change-Id: ${id}
+ """.stripMargin()
+
+ return commitMessage
+}
+
+/**
+ * Update (or create if cannot find) gerrit change request
+ *
+ * @param params Map of parameters to customize commit
+ * - gerritAuth A map containing information about Gerrit. Should include HOST, PORT and USER
+ * - credentialsId Jenkins credentials id for gerrit
+ * - repo Local directory with repository
+ * - comment Commit comment
+ * - change_id_seed Custom part of change id which can be added during change id generation
+ * - branch Name of the branch for uploading
+ * - topic Topic of the change
+ * - project Gerrit project to search in for gerrit change request
+ * - status Change request's status to search for
+ * - changeAuthorEmail Author's email of the change
+ * - changeAuthorName Author's name of the change
+ */
+def updateChangeRequest(Map params) {
+ def gerrit = new com.mirantis.mk.Gerrit()
+
+ def commitMessage
+ def auth = params['gerritAuth']
+ def creds = params['credentialsId']
+ def repo = params['repo']
+ def comment = params['comment']
+ def change_id_seed = params.get('change_id_seed', JOB_NAME)
+ def branch = params['branch']
+ def topic = params['topic']
+ def project = params['project']
+ def status = params.get('status', 'open')
+ def changeAuthorEmail = params['changeAuthorEmail']
+ def changeAuthorName = params['changeAuthorName']
+
+ def changeParams = ['owner': auth['USER'], 'status': status, 'project': project, 'branch': branch, 'topic': topic]
+ def gerritChange = gerrit.findGerritChange(creds, auth, changeParams)
+ def changeId
+ def commit
+ if (gerritChange) {
+ def jsonChange = readJSON text: gerritChange
+ changeId = jsonChange['id']
+ }
+ commitMessage = genCommitMessage(repo, comment, changeId, change_id_seed)
+ commitGitChanges(repo, commitMessage, changeAuthorEmail, changeAuthorName, false, false)
+ dir(repo){
+ commit = getGitCommit()
+ }
+ pushForReview(repo, creds, commit, branch, topic)
+}
diff --git a/src/com/mirantis/mk/GoogleCloudStorage.groovy b/src/com/mirantis/mk/GoogleCloudStorage.groovy
index 5bf5ad7..7206715 100644
--- a/src/com/mirantis/mk/GoogleCloudStorage.groovy
+++ b/src/com/mirantis/mk/GoogleCloudStorage.groovy
@@ -97,30 +97,46 @@
* @param dest Destination path in Google Storage, in format: gs://<path>
* @param acls ACLs for uploaded files
* @param entireTree Copy entire directory to bucket
+ * @param useDifferentGcloudSDKDir Allow to use different SDK config dirs/accounts in parallel
+ * @param gcloudSDKDir Actual path to SDK config dir, defaults to gcloud default: /home/<user>/.gcloud
+ * or if HOME is unset to /tmp/.gcloud
+ * @param revokeAccount Revoke account after actions
*
* Returns URLs list of uploaded files
*/
def uploadArtifactToGoogleStorageBucket(Map config) {
def gcloudDir = config.get('gcloudDir', '/tmp/gcloud')
+ def gcloudSDKDir = config.get('gcloudSDKDir', "${env.HOME ?: '/tmp'}/.gcloud")
def creds = config.get('creds')
def project = config.get('project')
def acls = config.get('acls', ['AllUsers:R'])
def sources = config.get('sources')
def dest = config.get('dest')
def entireTree = config.get('entireTree', false)
+ def useDifferentGcloudSDKDir = config.get('useDifferentGcloudSDKDir', true)
+ def revokeAccount = config.get('revokeAccount', true)
def fileURLs = []
if (!checkGcloudBinary(gcloudDir)) {
downloadGcloudUtil(gcloudDir)
}
- try {
- authGcloud(gcloudDir, creds, project)
- for(String src in sources) {
- def fileURL = cpFile(gcloudDir, src, dest, entireTree)
- setAcl(gcloudDir, fileURL, acls)
- fileURLs << fileURL
+ if (useDifferentGcloudSDKDir) {
+ gcloudSDKDir = "${gcloudSDKDir}_" + UUID.randomUUID().toString()
+ sh "mkdir -p ${gcloudSDKDir}"
+ }
+ withEnv(["CLOUDSDK_CONFIG=${gcloudSDKDir}"]) {
+ try {
+ authGcloud(gcloudDir, creds, project)
+ for(String src in sources) {
+ def fileURL = cpFile(gcloudDir, src, dest, entireTree)
+ setAcl(gcloudDir, fileURL, acls)
+ fileURLs << fileURL
+ }
+ } finally {
+ if (revokeAccount) {
+ revokeGcloud(gcloudDir)
+ sh "rm -rf ${gcloudSDKDir}"
+ }
}
- } finally {
- revokeGcloud(gcloudDir)
}
return fileURLs
}
\ No newline at end of file
diff --git a/src/com/mirantis/mk/Helm.groovy b/src/com/mirantis/mk/Helm.groovy
new file mode 100644
index 0000000..72d95cb
--- /dev/null
+++ b/src/com/mirantis/mk/Helm.groovy
@@ -0,0 +1,148 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Functions to work with Helm
+ *
+ */
+
+/**
+ * Build index file for helm chart
+ * @param extra_params additional params, e.g. --url repository_URL
+ * @param charts_dir path to a directory
+ */
+
+def helmRepoIndex(extra_params='', charts_dir='.'){
+ sh("helm repo index ${extra_params} ${charts_dir}")
+}
+
+/**
+ * Rebuild index file for helm chart repo
+ * @param helmRepoUrl repository with helm charts
+ * @param md5Remote md5 sum of index.yaml for check
+ */
+
+def helmMergeRepoIndex(helmRepoUrl, md5Remote='') {
+ def common = new com.mirantis.mk.Common()
+
+ def helmRepoDir = '.'
+ def helmExtraParams = "--url ${helmRepoUrl}"
+
+ def indexRes = common.shCmdStatus("wget -O index-upstream.yaml ${helmRepoUrl}/index.yaml")
+ if (indexRes['status']){
+ if (indexRes['status'] == 8 && indexRes['stderr'].contains('ERROR 404') && !md5Remote) {
+ common.warningMsg("Index.yaml not found in ${helmRepoUrl} and will be fully regenerated")
+ } else {
+ error("Something went wrong during index.yaml download: ${indexRes['stderr']}")
+ }
+ } else {
+ if (md5Remote) {
+ def md5Local = sh(script: "md5sum index-upstream.yaml | cut -d ' ' -f 1", returnStdout: true).readLines()[0]
+ if (md5Local != md5Remote) {
+ error 'Target repository already exist, but upstream index.yaml broken or not found'
+ }
+ }
+ helmExtraParams += " --merge index-upstream.yaml"
+ }
+ helmRepoIndex(helmExtraParams, helmRepoDir)
+}
+
+/**
+ * Generates version for helm chart based on information from git repository. Tries to search
+ * first parent git tag using pattern '[0-9]*-{tagSuffix}', if found that tag will be used
+ * in final version, if not found - version will be formed as '{defaultVersion}-{tagSuffix}'. Number
+ * of commits since last tag or sha of current commit can be added to version.
+ *
+ * @param repoDir string, path to a directory with git repository of helm charts
+ * @param devVersion Boolean, if set to true development version will be calculated e.g 0.1.0-mcp-{sha of current commit}
+ * @param increment Boolean, if set to true patch version will be incremented (e.g 0.1.0 -> 0.1.1)
+ * @param defaultVersion string, value of version which will be used in case no tags found. should be semver2 compatible
+ * @param tagSuffix string, suffix which will be used for finding tags in git repository, also if tag not found, it
+ * it will be added to {defaultVersion} e.g {defaultVersion}-{tagSuffix}
+ */
+
+def generateChartVersionFromGit(repoDir, devVersion = true, increment = false, defaultVersion = '0.1.0', tagSuffix = 'mcp') {
+ def common = new com.mirantis.mk.Common()
+ def git = new com.mirantis.mk.Git()
+ String initialVersion = "${defaultVersion}-${tagSuffix}"
+ String countRange
+ String versionData
+ String tagPattern = "[0-9]*-${tagSuffix}"
+ dir(repoDir){
+ Map cmd = common.shCmdStatus("git describe --tags --first-parent --abbrev=0 --match ${tagPattern}")
+ String lastTag = cmd['stdout'].trim()
+
+ if (cmd['status'] != 0){
+ if (cmd['stderr'].contains('fatal: No names found, cannot describe anything')){
+ common.warningMsg("No parent git tag found, using initial version ${initialVersion}")
+ versionData = initialVersion
+ countRange = 'HEAD'
+ } else {
+ error("Something went wrong, cannot find git information ${cmd['stderr']}")
+ }
+ } else {
+ versionData = lastTag
+ countRange = "${lastTag}..HEAD"
+ }
+ List versionParts = versionData.tokenize('-')
+
+ if (versionParts.size() == 2 && common.isSemVer(versionData) && versionParts[1] == tagSuffix){
+ String commitsSinceTag = sh(script: "git rev-list --count ${countRange}", returnStdout: true).trim()
+ String commitSha = sh(script: 'git rev-parse --short=7 HEAD', returnStdout: true).trim()
+
+ if (commitsSinceTag == '0'){
+ return versionData
+ }
+
+ if (devVersion){
+ versionParts.add(commitSha)
+ } else {
+ versionParts.add(commitsSinceTag)
+ }
+ // Patch version will be incremented e.g. 0.1.0 -> 0.1.1
+ if (increment) {
+ versionParts[0] = git.incrementVersion(versionParts[0])
+ }
+ return versionParts.join('-')
+ }
+ error "Version ${versionData} doesn't contain required suffix ${tagSuffix} or not in semver2 format"
+ }
+}
+
+/**
+ * Takes a list of dependencies and a version, and sets a version for each dependency in requirements.yaml. If dependency isn't
+ * found in requirements.yaml or requirements.yaml does not exist - does nothing.
+ *
+ * @param chartPath string, path to a directory with helm chart
+ * @param dependencies list of hashes with names and versions of dependencies in format:
+ * [['name': 'chart-name1', 'version': '0.1.0-myversion'], ['name': 'chart-name2', 'version': '0.2.0-myversion']]
+ */
+
+def setChartDependenciesVersion(chartPath, List dependencies){
+ def common = new com.mirantis.mk.Common()
+ if (!dependencies){
+ error 'No list of target dependencies is specified'
+ }
+ def reqsFilePath = "${chartPath}/requirements.yaml"
+ def chartYaml = readYaml file: "${chartPath}/Chart.yaml"
+ def reqsUpdateNeeded = false
+ def reqsMap = [:]
+ if (fileExists(reqsFilePath)){
+ reqsMap = readYaml file: reqsFilePath
+ for (i in dependencies) {
+ for (item in reqsMap.get('dependencies', [])){
+ if (item['name'] == i['name']){
+ common.infoMsg("Set version ${i['version']} for dependency ${i['name']} in chart ${chartYaml['name']}")
+ item['version'] = i['version']
+ reqsUpdateNeeded = true
+ }
+ }
+ }
+ }
+ if (reqsUpdateNeeded){
+ sh "rm ${reqsFilePath}"
+ writeYaml file: reqsFilePath, data: reqsMap
+ } else {
+ common.warningMsg("requirements.yaml doesn't exist at path ${reqsFilePath} or chart doesn't contain ${dependencies}, nothing to set")
+ }
+}
diff --git a/src/com/mirantis/mk/JenkinsUtils.groovy b/src/com/mirantis/mk/JenkinsUtils.groovy
index 4513f2d..780ccab 100644
--- a/src/com/mirantis/mk/JenkinsUtils.groovy
+++ b/src/com/mirantis/mk/JenkinsUtils.groovy
@@ -231,3 +231,43 @@
}
return buildsMap
}
+
+/**
+ * Check dependency jobs passed successfully
+
+ * @param block (bool) Block child jobs in case of parent dependencies failed
+ * @return (map)[
+ * status: (bool) True if there are no failed dependencies
+ * log: (string) Verbose description
+ * ]
+ */
+def checkDependencyJobs(block = true) {
+ def common = new com.mirantis.mk.Common()
+ depList = []
+ if (env.TRIGGER_DEPENDENCY_KEYS){
+ common.infoMsg('Job may depends on parent jobs, check if dependency jobs exist...')
+ depKeys = env.TRIGGER_DEPENDENCY_KEYS.toString()
+ depList = depKeys.split()
+ if (depList){
+ common.infoMsg('Here is dependency jobs-list: ' + depList)
+ for (String item : depList) {
+ prjName = item.replaceAll('[^a-zA-Z0-9]+', '_')
+ triggerResult = 'TRIGGER_' + prjName.toUpperCase() + '_BUILD_RESULT'
+ triggerJobName = 'TRIGGER_' + prjName.toUpperCase() + '_BUILD_NAME'
+ triggerJobBuild = 'TRIGGER_' + prjName.toUpperCase() + '_BUILD_NUMBER'
+ if (env.getProperty(triggerResult) != 'SUCCESS'){
+ msg = "Dependency job ${env.getProperty(triggerJobName)} #${env.getProperty(triggerJobBuild)} is ${env.getProperty(triggerResult)}"
+ common.warningMsg(msg)
+ if (block){
+ currentBuild.result = 'NOT_BUILT'
+ currentBuild.description = msg
+ }
+ return [status: false, log: msg, jobs: depList]
+ }
+ }
+ }
+ } else {
+ common.infoMsg('There is no job-dependencies')
+ }
+ return [status: true, log: '', jobs: depList]
+}
diff --git a/src/com/mirantis/mk/KaasReleaseWorkflow.groovy b/src/com/mirantis/mk/KaasReleaseWorkflow.groovy
new file mode 100644
index 0000000..6cc39e3
--- /dev/null
+++ b/src/com/mirantis/mk/KaasReleaseWorkflow.groovy
@@ -0,0 +1,160 @@
+package com.mirantis.mk
+/**
+ * Checkout KaaS release metadata repo with clone or without, if cloneRepo parameter is set
+ *
+ * @param params map with expected parameters:
+ * - metadataCredentialsId
+ * - metadataGitRepoUrl
+ * - metadataGitRepoBranch
+ * - repoDir
+ * - cloneRepo
+ */
+def checkoutKaasReleaseMetadataRepo(Map params = [:]) {
+ def git = new com.mirantis.mk.Git()
+
+ String gitCredentialsId = params.get('metadataCredentialsId', 'mcp-ci-gerrit')
+ String gitUrl = params.get('metadataGitRepoUrl', "ssh://${gitCredentialsId}@gerrit.mcp.mirantis.net:29418/kaas/releases")
+ String gitBranch = params.get('metadataGitRepoBranch', 'master')
+ String gitRef = params.get('metadataGitRepoRef', 'HEAD')
+ String repoDir = params.get('repoDir', 'releases')
+ Boolean cloneRepo = params.get('cloneRepo', true)
+ if (cloneRepo) {
+ stage('Cleanup repo dir') {
+ dir(repoDir) {
+ deleteDir()
+ }
+ }
+ stage('Cloning artifact-metadata repository') {
+ git.checkoutGitRepository(repoDir, gitUrl, gitBranch, gitCredentialsId, true, 10, 0, gitRef)
+ }
+ } else {
+ git.changeGitBranch(repoDir, gitRef ?: gitBranch)
+ }
+}
+
+
+/**
+ * Update KaaS release metadata value and upload CR to release metadata repository
+ *
+ * @param key metadata key (Several keys could be passed joined by ';' character)
+ * @param value metadata value (Several values could be passed joined by ';' character)
+ * @param params map with expected parameters:
+ * - metadataCredentialsId
+ * - metadataGitRepoUrl
+ * - metadataGitRepoBranch
+ * - repoDir
+ * - comment
+ * - crTopic
+ * - crAuthorName
+ * - crAuthorEmail
+ * - kaasMetadataFileToUpdate
+ */
+
+def updateKaasReleaseMetadata(String key, String value, Map params) {
+ String gitCredentialsId = params.get('metadataCredentialsId', 'mcp-ci-gerrit')
+ String metadataRepoUrl = params.get('metadataGitRepoUrl', "ssh://${gitCredentialsId}@gerrit.mcp.mirantis.net:29418/kaas/releases")
+ String metadataGerritBranch = params.get('metadataGitRepoBranch', 'master')
+ String repoDir = params.get('repoDir', 'releases')
+ String comment = params.get('comment', '')
+ String crTopic = params.get('crTopic', '')
+ String changeAuthorName = params.get('crAuthorName', 'MCP-CI')
+ String changeAuthorEmail = params.get('crAuthorEmail', 'mcp-ci-jenkins@ci.mcp.mirantis.net')
+ String fileToUpdatePath = params.get('kaasMetadataFileToUpdatePath', '')
+ String updateChartVersion = params.get('kaasMetadataUpdateChartVersion', '1')
+ String updateTagVersion = params.get('kaasMetadataUpdateTagVersion', '1')
+
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def gerrit = new com.mirantis.mk.Gerrit()
+ def git = new com.mirantis.mk.Git()
+
+ def cred = common.getCredentials(gitCredentialsId, 'key')
+ String gerritUser = cred.username
+ String gerritHost = metadataRepoUrl.tokenize('@')[-1].tokenize(':')[0]
+ String metadataProject = metadataRepoUrl.tokenize('/')[-2..-1].join('/')
+ String gerritPort = metadataRepoUrl.tokenize(':')[-1].tokenize('/')[0]
+ String workspace = common.getWorkspace()
+ String venvDir = "${workspace}/gitreview-venv"
+ String ChangeId
+ String commitMessage
+ String gitRemote
+ stage("Installing virtualenv") {
+ python.setupVirtualenv(venvDir, 'python3', ['git-review'])
+ }
+ checkoutKaasReleaseMetadataRepo(params)
+ dir(repoDir) {
+ gitRemote = sh(
+ script:
+ 'git remote -v | head -n1 | cut -f1',
+ returnStdout: true,
+ ).trim()
+ }
+
+ stage('Creating CR') {
+ def gerritAuth = ['PORT': gerritPort, 'USER': gerritUser, 'HOST': gerritHost]
+ def changeParams = ['owner': gerritUser, 'status': 'open', 'project': metadataProject, 'branch': metadataGerritBranch, 'topic': crTopic]
+ def gerritChange = gerrit.findGerritChange(gitCredentialsId, gerritAuth, changeParams)
+ git.changeGitBranch(repoDir, metadataGerritBranch)
+ if (gerritChange) {
+ def jsonChange = readJSON text: gerritChange
+ changeNum = jsonChange['number']
+ ChangeId = 'Change-Id: '
+ ChangeId += jsonChange['id']
+ } else {
+ ChangeId = ''
+ git.createGitBranch(repoDir, crTopic)
+ }
+
+ def keyArr = key.split(';')
+ def valueArr = value.split(';')
+ if (keyArr.size() == valueArr.size()) {
+ for (i in 0..keyArr.size()-1) {
+ common.infoMsg("Setting ${keyArr[i]} version to: ${valueArr[i]}")
+ sh '''set +x
+ tmpfile=\$(mktemp kaas_tmp_file.XXXXXX)
+ trap "rm -f \$tmpfile" 1 2 3 6
+
+ awk \
+ -v key_name='''+keyArr[i]+''' \
+ -v key_ver='''+valueArr[i]+''' \
+ -v update_chart_version='''+updateChartVersion+''' \
+ -v update_tag_version='''+updateTagVersion+''' '
+BEGIN {
+ match_found = 0;
+}
+
+// {
+if ($0 ~ "^\\\\s+- name: "key_name) {
+ match_found = 1;
+ print $0;
+ next;
+}
+if ($0 ~ /^\\s+- name: /) {
+ match_found = 0;
+}
+if (update_chart_version && match_found && $0 ~ /^\\s+version: /) {
+ print gensub(/(\\s+version:).*/,"\\\\1 "key_ver,1,$0);
+ next;
+}
+if (update_tag_version && match_found && $0 ~ /^\\s+tag: /) {
+ print gensub(/(\\s+tag:).*/,"\\\\1 "key_ver,1,$0);
+ next;
+}
+print $0
+}' '''+repoDir+'/'+fileToUpdatePath+''' > "\$tmpfile"
+ mv "\${tmpfile}" '''+repoDir+'/'+fileToUpdatePath+'''
+ '''
+ }
+ }
+
+ commitMessage =
+ """${comment}
+
+ |${ChangeId}
+ """.stripMargin()
+ //commit change
+ git.commitGitChanges(repoDir, commitMessage, changeAuthorEmail, changeAuthorName, false)
+ //post change
+ gerrit.postGerritReview(gitCredentialsId, venvDir, repoDir, changeAuthorName, changeAuthorEmail, gitRemote, crTopic, metadataGerritBranch)
+ }
+}
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index 509fe87..3f1ffb4 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -19,7 +19,7 @@
salt.runSaltProcessStep(master, "I@salt:minion ${extra_tgt}", 'state.show_top')
}
-def installFoundationInfra(master, staticMgmtNet=false, extra_tgt = '') {
+def installFoundationInfra(master, staticMgmtNet=false, extra_tgt = '', batch=20) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
@@ -30,48 +30,50 @@
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['linux.system']])
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['salt.master'], failOnError: false, read_timeout: 120, retries: 2])
- salt.fullRefresh(master, "* ${extra_tgt}")
+ salt.fullRefresh(master, "* ${extra_tgt}", batch)
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['salt.minion'], failOnError: false, read_timeout: 60, retries: 2])
salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['salt.minion']])
- salt.fullRefresh(master, "* ${extra_tgt}")
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.proxy'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.fullRefresh(master, "* ${extra_tgt}", batch)
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.proxy'], batch: batch, failOnError: false, read_timeout: 180, retries: 2])
// Make sure all repositories are in place before proceeding with package installation from other states
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system.repo'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system.repo'], batch: batch, failOnError: false, read_timeout: 180, retries: 2])
try {
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion.base'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion.base'], batch: batch, failOnError: false, read_timeout: 180, retries: 2])
sleep(5)
} catch (Throwable e) {
common.warningMsg('Salt state salt.minion.base is not present in the Salt-formula yet.')
}
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system'], retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.system'], batch: batch, retries: 2])
if (staticMgmtNet) {
- salt.runSaltProcessStep(master, "* ${extra_tgt}", 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], null, true, 60)
+ salt.runSaltProcessStep(master, "* ${extra_tgt}", 'cmd.shell', ["salt-call state.sls linux.network; salt-call service.restart salt-minion"], batch, true, 180)
}
- salt.enforceState([saltId: master, target: "I@linux:network:interface ${extra_tgt}", state: ['linux.network.interface'], retries: 2])
+ salt.enforceState([saltId: master, target: "I@linux:network:interface ${extra_tgt}", state: ['linux.network.interface'], batch: batch, retries: 2])
sleep(5)
- salt.enforceState([saltId: master, target: "I@linux:system ${extra_tgt}", state: ['linux', 'openssh', 'ntp', 'rsyslog']])
+ salt.enforceState([saltId: master, target: "I@linux:system ${extra_tgt}", state: ['linux', 'openssh', 'ntp', 'rsyslog'], batch: batch])
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion'], failOnError: false, read_timeout: 60, retries: 2])
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['salt.minion'], failOnError: false, batch: batch, read_timeout: 180, retries: 2])
sleep(5)
- salt.fullRefresh(master, "* ${extra_tgt}")
- salt.runSaltProcessStep(master, "* ${extra_tgt}", 'mine.update', [], null, true)
- salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.host']])
+ salt.fullRefresh(master, "* ${extra_tgt}", batch)
+ salt.runSaltProcessStep(master, "* ${extra_tgt}", 'mine.update', [], batch, true)
+ salt.enforceState([saltId: master, target: "* ${extra_tgt}", state: ['linux.network.host'], batch: batch])
+ // WA for PROD-33911
+ salt.enforceState([saltId: master, target: "I@linux:system ${extra_tgt}", state: ['openssh'], batch: batch])
// Install and configure iptables
- salt.enforceStateWithTest([saltId: master, target: "I@iptables:service ${extra_tgt}", state: 'iptables'])
+ salt.enforceStateWithTest([saltId: master, target: "I@iptables:service ${extra_tgt}", state: 'iptables', batch: batch])
// Install and configure logrotate
- salt.enforceStateWithTest([saltId: master, target: "I@logrotate:server ${extra_tgt}", state: 'logrotate'])
+ salt.enforceStateWithTest([saltId: master, target: "I@logrotate:server ${extra_tgt}", state: 'logrotate', batch: batch])
// Install and configure auditd
- salt.enforceStateWithTest([saltId: master, target: "I@auditd:service ${extra_tgt}", state: 'auditd'])
+ salt.enforceStateWithTest([saltId: master, target: "I@auditd:service ${extra_tgt}", state: 'auditd', batch: batch])
// Install and configure openscap
- salt.enforceStateWithTest([saltId: master, target: "I@openscap:service ${extra_tgt}", state: 'openscap'])
+ salt.enforceStateWithTest([saltId: master, target: "I@openscap:service ${extra_tgt}", state: 'openscap', batch: batch])
}
def installFoundationInfraOnTarget(master, target, staticMgmtNet=false, extra_tgt = '') {
@@ -158,7 +160,7 @@
}
common.infoMsg("All minions are up.")
- salt.fullRefresh(master, "* and not kvm* ${extra_tgt}")
+ salt.fullRefresh(master, "* and not I@salt:control ${extra_tgt}")
}
@@ -498,20 +500,20 @@
}
-def installOpenstackCompute(master, extra_tgt = '') {
+def installOpenstackCompute(master, extra_tgt = '', batch=20) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
// Configure compute nodes
def compute_compound = "I@nova:compute ${extra_tgt}"
- if (salt.testTarget(master, compute_compound)) {
+ if (salt.testTarget(master, compute_compound, batch)) {
// In case if infrastructure nodes are used as nova computes too
def gluster_compound = "I@glusterfs:server ${extra_tgt}"
def salt_ca_compound = "I@salt:minion:ca:salt_master_ca ${extra_tgt}"
// Enforce highstate asynchronous only on compute nodes which are not glusterfs and not salt ca servers
def hightstateTarget = "${compute_compound} and not ${gluster_compound} and not ${salt_ca_compound}"
- if (salt.testTarget(master, hightstateTarget)) {
+ if (salt.testTarget(master, hightstateTarget, batch)) {
retry(2) {
- salt.enforceHighstate(master, hightstateTarget)
+ salt.enforceHighstate(master, hightstateTarget, false, true, batch)
}
} else {
common.infoMsg("No minions matching highstate target found for target ${hightstateTarget}")
@@ -519,8 +521,8 @@
// Iterate through salt ca servers and check if they have compute role
// TODO: switch to batch once salt 2017.7+ would be used
common.infoMsg("Checking whether ${salt_ca_compound} minions have ${compute_compound} compound")
- for ( target in salt.getMinionsSorted(master, salt_ca_compound) ) {
- for ( cmp_target in salt.getMinionsSorted(master, compute_compound) ) {
+ for ( target in salt.getMinionsSorted(master, salt_ca_compound, batch) ) {
+ for ( cmp_target in salt.getMinionsSorted(master, compute_compound, batch) ) {
if ( target == cmp_target ) {
// Enforce highstate one by one on salt ca servers which are compute nodes
retry(2) {
@@ -532,8 +534,8 @@
// Iterate through glusterfs servers and check if they have compute role
// TODO: switch to batch once salt 2017.7+ would be used
common.infoMsg("Checking whether ${gluster_compound} minions have ${compute_compound} compound")
- for ( target in salt.getMinionsSorted(master, gluster_compound) ) {
- for ( cmp_target in salt.getMinionsSorted(master, compute_compound) ) {
+ for ( target in salt.getMinionsSorted(master, gluster_compound, batch) ) {
+ for ( cmp_target in salt.getMinionsSorted(master, compute_compound, batch) ) {
if ( target == cmp_target ) {
// Enforce highstate one by one on glusterfs servers which are compute nodes
retry(2) {
@@ -976,7 +978,7 @@
salt.enforceStateWithTest([saltId: master, target: "I@sphinx:server ${extra_tgt}", state: 'sphinx'])
//Configure Grafana
- pillar = salt.getPillar(master, "ctl01* ${extra_tgt}", '_param:stacklight_monitor_address')
+ pillar = salt.getPillar(master, "I@keystone:server:role:primary ${extra_tgt}", '_param:stacklight_monitor_address')
common.prettyPrint(pillar)
def stacklight_vip
@@ -1271,7 +1273,7 @@
def salt = new com.mirantis.mk.Salt()
//Get oss VIP address
- def pillar = salt.getPillar(master, "cfg01* ${extra_tgt}", '_param:stacklight_monitor_address')
+ def pillar = salt.getPillar(master, "I@salt:master ${extra_tgt}", '_param:stacklight_monitor_address')
common.prettyPrint(pillar)
def oss_vip
diff --git a/src/com/mirantis/mk/Python.groovy b/src/com/mirantis/mk/Python.groovy
index 8d9d19b..d82f68a 100644
--- a/src/com/mirantis/mk/Python.groovy
+++ b/src/com/mirantis/mk/Python.groovy
@@ -34,7 +34,12 @@
sh(returnStdout: true, script: virtualenv_cmd)
if (!offlineDeployment) {
try {
- runVirtualenvCommand(path, "pip install -U setuptools pip")
+ def pipPackage = 'pip'
+ if (python == 'python2') {
+ pipPackage = "\"pip<=19.3.1\""
+ common.infoMsg("Pinning pip package due to end of life of Python2 to ${pipPackage} version.")
+ }
+ runVirtualenvCommand(path, "pip install -U setuptools ${pipPackage}")
} catch (Exception e) {
common.warningMsg("Setuptools and pip cannot be updated, you might be offline but OFFLINE_DEPLOYMENT global property not initialized!")
}
@@ -77,6 +82,76 @@
}
/**
+ * Another command runner to control outputs and exit code
+ *
+ * - always print the executing command to control the pipeline execution
+ * - always allows to get the stdout/stderr/status in the result, even with enabled console enabled
+ * - throws an exception with stderr content, so it could be read from the job status and processed
+ *
+ * @param cmd String, command to be executed
+ * @param virtualenv String, path to Python virtualenv (optional, default: '')
+ * @param verbose Boolean, true: (default) mirror stdout to console and to the result['stdout'] at the same time,
+ * false: store stdout only to result['stdout']
+ * @param check_status Boolean, true: (default) throw an exception which contains result['stderr'] if exit code is not 0,
+ * false: only print stderr if not empty, and return the result
+ * @return Map, ['status' : int, 'stderr' : str, 'stdout' : str ]
+ */
+def runCmd(String cmd, String virtualenv='', Boolean verbose=true, Boolean check_status=true) {
+ def common = new com.mirantis.mk.Common()
+
+ def script
+ def redirect_output
+ def result = [:]
+ def stdout_path = sh(script: '#!/bin/bash +x\nmktemp', returnStdout: true).trim()
+ def stderr_path = sh(script: '#!/bin/bash +x\nmktemp', returnStdout: true).trim()
+
+ if (verbose) {
+ // show stdout to console and store to stdout_path
+ redirect_output = " 1> >(tee -a ${stdout_path}) 2>${stderr_path}"
+ } else {
+ // only store stdout to stdout_path
+ redirect_output = " 1>${stdout_path} 2>${stderr_path}"
+ }
+
+ if (virtualenv) {
+ common.infoMsg("Run shell command in Python virtualenv [${virtualenv}]:\n" + cmd)
+ script = """#!/bin/bash +x
+ . ${virtualenv}/bin/activate
+ ( ${cmd.stripIndent()} ) ${redirect_output}
+ """
+ } else {
+ common.infoMsg('Run shell command:\n' + cmd)
+ script = """#!/bin/bash +x
+ ( ${cmd.stripIndent()} ) ${redirect_output}
+ """
+ }
+
+ result['status'] = sh(script: script, returnStatus: true)
+ result['stdout'] = readFile(stdout_path)
+ result['stderr'] = readFile(stderr_path)
+ def cleanup_script = """#!/bin/bash +x
+ rm ${stdout_path} || true
+ rm ${stderr_path} || true
+ """
+ sh(script: cleanup_script)
+
+ if (result['status'] != 0 && check_status) {
+ def error_message = '\nScript returned exit code: ' + result['status'] + '\n<<<<<< STDERR: >>>>>>\n' + result['stderr']
+ common.errorMsg(error_message)
+ common.printMsg('', 'reset')
+ throw new Exception(error_message)
+ }
+
+ if (result['stderr'] && verbose) {
+ def warning_message = '\nScript returned exit code: ' + result['status'] + '\n<<<<<< STDERR: >>>>>>\n' + result['stderr']
+ common.warningMsg(warning_message)
+ common.printMsg('', 'reset')
+ }
+
+ return result
+}
+
+/**
* Install docutils in isolated environment
*
* @param path Path where virtualenv is created
diff --git a/src/com/mirantis/mk/ReleaseWorkflow.groovy b/src/com/mirantis/mk/ReleaseWorkflow.groovy
index a81f0d1..c27fa8a 100644
--- a/src/com/mirantis/mk/ReleaseWorkflow.groovy
+++ b/src/com/mirantis/mk/ReleaseWorkflow.groovy
@@ -1,8 +1,36 @@
package com.mirantis.mk
/**
- * ReleaseWorkflow functions
+ * Checkout release metadata repo with clone or without, if cloneRepo parameter is set
*
+ * @param params map with expected parameters:
+ * - metadataCredentialsId
+ * - metadataGitRepoUrl
+ * - metadataGitRepoBranch
+ * - repoDir
+ * - cloneRepo
*/
+def checkoutReleaseMetadataRepo(Map params = [:]) {
+ def git = new com.mirantis.mk.Git()
+
+ String gitCredentialsId = params.get('metadataCredentialsId', 'mcp-ci-gerrit')
+ String gitUrl = params.get('metadataGitRepoUrl', "ssh://${gitCredentialsId}@gerrit.mcp.mirantis.net:29418/mcp/artifact-metadata")
+ String gitBranch = params.get('metadataGitRepoBranch', 'master')
+ String gitRef = params.get('metadataGitRepoRef', '')
+ String repoDir = params.get('repoDir', 'artifact-metadata')
+ Boolean cloneRepo = params.get('cloneRepo', true)
+ if (cloneRepo) {
+ stage('Cleanup repo dir') {
+ dir(repoDir) {
+ deleteDir()
+ }
+ }
+ stage('Cloning artifact-metadata repository') {
+ git.checkoutGitRepository(repoDir, gitUrl, gitBranch, gitCredentialsId, true, 10, 0, gitRef)
+ }
+ } else {
+ git.changeGitBranch(repoDir, gitRef ?: gitBranch)
+ }
+}
/**
* Get release metadata value for given key
@@ -10,116 +38,145 @@
* @param key metadata key
* @param params map with expected parameters:
* - toxDockerImage
- * - metadataCredentialsId
- * - metadataGitRepoUrl
- * - metadataGitRepoBranch
+ * - outputFormat
* - repoDir
*/
def getReleaseMetadataValue(String key, Map params = [:]) {
String result
// Get params
String toxDockerImage = params.get('toxDockerImage', 'docker-prod-virtual.docker.mirantis.net/mirantis/external/tox')
- String gitCredentialsId = params.get('metadataCredentialsId', 'mcp-ci-gerrit')
- String gitUrl = params.get('metadataGitRepoUrl', "ssh://${gitCredentialsId}@gerrit.mcp.mirantis.net:29418/mcp/release-metadata")
- String gitBranch = params.get('metadataGitRepoBranch', 'master')
- String repoDir = params.get('repoDir', 'release-metadata')
String outputFormat = params.get('outputFormat', 'json')
+ String repoDir = params.get('repoDir', 'artifact-metadata')
// Libs
- def git = new com.mirantis.mk.Git()
+ def common = new com.mirantis.mk.Common()
String opts = ''
if (outputFormat && !outputFormat.isEmpty()) {
opts += " --${outputFormat}"
}
- // TODO cache it somehow to not checkout it all the time
- git.checkoutGitRepository(repoDir, gitUrl, gitBranch, gitCredentialsId, true, 10, 0)
+
+ checkoutReleaseMetadataRepo(params)
+
docker.image(toxDockerImage).inside {
result = sh(script: "cd ${repoDir} && tox -qq -e metadata -- ${opts} get --key ${key}", returnStdout: true).trim()
}
+ common.infoMsg("""
+ Release metadata key ${key} has value:
+ ${result}
+ """)
return result
}
/**
- * Update release metadata after image build
+ * Get release metadata value for given key
*
* @param key metadata key
- * @param value metadata value
- * @param params string map with credentialsID, metadataRepoUrl, metadataGerritBranch and crTopic
+ * @param metadataDir metadata directory
+ * @param dirdepth the level at which YAML file should be created
*/
-def updateReleaseMetadata(String key, String value, Map params) {
- credentialsID = params['credentialsID'] ?: "mcp-ci-gerrit"
- metadataRepoUrl = params['metadataRepoUrl'] ?: "ssh://mcp-ci-gerrit@gerrit.mcp.mirantis.net:29418/mcp/release-metadata"
- metadataGerritBranch = params['metadataGerritBranch'] ?: "master"
- comment = params['comment'] ?: ""
- crTopic = params['crTopic'] ?: ""
+def precreateKeyReleaseMetadataFile(String key, String metadataDir, Integer dirdepth = 0) {
+ def keySize = key.split(':').size() - 1
+ if (dirdepth > 0 && dirdepth - 1 <= keySize) {
+ def dirPath = metadataDir + '/' + key.split(':')[0..dirdepth - 1].join('/')
+ sh "if ! test -d \"${dirPath}\" ; then mkdir -p \"${dirPath}\"; fi"
+ if (dirdepth - 1 != keySize) {
+ def pathToDummyFile = dirPath + '/' + key.split(':')[dirdepth] + '.yml'
+ sh "if ! test -f \"${pathToDummyFile}\" ; then touch \"${pathToDummyFile}\"; fi"
+ }
+ }
+}
+
+/**
+ * Update release metadata value and upload CR to release metadata repository
+ *
+ * @param key metadata key (Several keys could be passed joined by ';' character)
+ * @param value metadata value (Several values could be passed joined by ';' character)
+ * @param params map with expected parameters:
+ * - metadataCredentialsId
+ * - metadataGitRepoUrl
+ * - metadataGitRepoBranch
+ * - repoDir
+ * - comment
+ * - crTopic
+ * - crAuthorName
+ * - crAuthorEmail
+ * @param dirdepth level of creation dirs from key param
+ */
+
+def updateReleaseMetadata(String key, String value, Map params, Integer dirdepth = 0) {
+ String gitCredentialsId = params.get('metadataCredentialsId', 'mcp-ci-gerrit')
+ String metadataRepoUrl = params.get('metadataGitRepoUrl', "ssh://${gitCredentialsId}@gerrit.mcp.mirantis.net:29418/mcp/artifact-metadata")
+ String metadataGerritBranch = params.get('metadataGitRepoBranch', 'master')
+ String repoDir = params.get('repoDir', 'artifact-metadata')
+ String comment = params.get('comment', '')
+ String crTopic = params.get('crTopic', '')
+ String changeAuthorName = params.get('crAuthorName', 'MCP-CI')
+ String changeAuthorEmail = params.get('crAuthorEmail', 'mcp-ci-jenkins@ci.mcp.mirantis.net')
+
def common = new com.mirantis.mk.Common()
def python = new com.mirantis.mk.Python()
def gerrit = new com.mirantis.mk.Gerrit()
- def git = new com.mirantis.mk.Git()
- def changeAuthorName = "MCP-CI"
- def changeAuthorEmail = "mcp-ci-jenkins@ci.mcp.mirantis.net"
- def cred = common.getCredentials(credentialsID, 'key')
+ def git = new com.mirantis.mk.Git()
+
+ def cred = common.getCredentials(gitCredentialsId, 'key')
String gerritUser = cred.username
- def gerritHost = metadataRepoUrl.tokenize('@')[-1].tokenize(':')[0]
- def metadataProject = metadataRepoUrl.tokenize('/')[-2..-1].join('/')
- def gerritPort = metadataRepoUrl.tokenize(':')[-1].tokenize('/')[0]
- def workspace = common.getWorkspace()
- def venvDir = "${workspace}/gitreview-venv"
- def repoDir = "${venvDir}/repo"
- def metadataDir = "${repoDir}/metadata"
- def ChangeId
- def commitMessage
- def gitRemote
+ String gerritHost = metadataRepoUrl.tokenize('@')[-1].tokenize(':')[0]
+ String metadataProject = metadataRepoUrl.tokenize('/')[-2..-1].join('/')
+ String gerritPort = metadataRepoUrl.tokenize(':')[-1].tokenize('/')[0]
+ String workspace = common.getWorkspace()
+ String venvDir = "${workspace}/gitreview-venv"
+ String metadataDir = "${repoDir}/metadata"
+ String ChangeId
+ String commitMessage
+ String gitRemote
stage("Installing virtualenv") {
python.setupVirtualenv(venvDir, 'python3', ['git-review', 'PyYaml'])
}
- stage('Cleanup repo dir') {
- dir(repoDir) {
- deleteDir()
- }
+ checkoutReleaseMetadataRepo(params)
+ dir(repoDir) {
+ gitRemote = sh(
+ script:
+ 'git remote -v | head -n1 | cut -f1',
+ returnStdout: true,
+ ).trim()
}
- stage('Cloning release-metadata repository') {
- git.checkoutGitRepository(repoDir, metadataRepoUrl, metadataGerritBranch, credentialsID, true, 10, 0)
- dir(repoDir) {
- gitRemote = sh(
- script:
- 'git remote -v | head -n1 | cut -f1',
- returnStdout: true,
- ).trim()
- }
- }
+
stage('Creating CR') {
def gerritAuth = ['PORT': gerritPort, 'USER': gerritUser, 'HOST': gerritHost]
def changeParams = ['owner': gerritUser, 'status': 'open', 'project': metadataProject, 'branch': metadataGerritBranch, 'topic': crTopic]
- def gerritChange = gerrit.findGerritChange(credentialsID, gerritAuth, changeParams)
+ def gerritChange = gerrit.findGerritChange(gitCredentialsId, gerritAuth, changeParams)
git.changeGitBranch(repoDir, metadataGerritBranch)
if (gerritChange) {
def jsonChange = readJSON text: gerritChange
changeNum = jsonChange['number']
ChangeId = 'Change-Id: '
ChangeId += jsonChange['id']
- //get existent change from gerrit
- gerrit.getGerritChangeByNum(credentialsID, venvDir, repoDir, gitRemote, changeNum)
} else {
ChangeId = ''
git.createGitBranch(repoDir, crTopic)
}
- cmdText = "python '${repoDir}/utils/app.py' --path '${metadataDir}' update --key '${key}' --value '${value}'"
- python.runVirtualenvCommand(venvDir, cmdText)
+
+ def keyArr = key.split(';')
+ def valueArr = value.split(';')
+ if (keyArr.size() == valueArr.size()) {
+ for (i in 0..keyArr.size()-1) {
+ precreateKeyReleaseMetadataFile(keyArr[i], metadataDir, dirdepth)
+
+ cmdText = "python '${repoDir}/utils/app.py' --path '${metadataDir}' update --key '${keyArr[i]}' --value '${valueArr[i]}'"
+ python.runVirtualenvCommand(venvDir, cmdText)
+ }
+ }
+
commitMessage =
"""${comment}
|${ChangeId}
""".stripMargin()
//commit change
- if (gerritChange) {
- git.commitGitChanges(repoDir, commitMessage, changeAuthorEmail, changeAuthorName, false, true)
- } else {
- git.commitGitChanges(repoDir, commitMessage, changeAuthorEmail, changeAuthorName, false)
- }
+ git.commitGitChanges(repoDir, commitMessage, changeAuthorEmail, changeAuthorName, false)
//post change
- gerrit.postGerritReview(credentialsID, venvDir, repoDir, changeAuthorName, changeAuthorEmail, gitRemote, crTopic, metadataGerritBranch)
+ gerrit.postGerritReview(gitCredentialsId, venvDir, repoDir, changeAuthorName, changeAuthorEmail, gitRemote, crTopic, metadataGerritBranch)
}
}
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index daff9fc..f4147e0 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -1,6 +1,5 @@
package com.mirantis.mk
-import com.cloudbees.groovy.cps.NonCPS
import java.util.stream.Collectors
/**
* Salt functions
@@ -50,14 +49,16 @@
* data: ['expression': 'I@openssh:server', 'type': 'compound'])
* @param function Function to execute (eg. "state.sls")
* @param batch Batch param to salt (integer or string with percents)
+ * - null - automatic decision (based on number of worker threads env var or not use batch at all)
+ * - int - fixed size of batch
+ * - 'str%' - percantage of the requests in one batch
* @param args Additional arguments to function
* @param kwargs Additional key-value arguments to function
* @param timeout Additional argument salt api timeout
* @param read_timeout http session read timeout
*/
-@NonCPS
-def runSaltCommand(saltId, client, target, function, batch = null, args = null, kwargs = null, timeout = -1, read_timeout = -1) {
+def runSaltCommand(saltId, client, target, function, batch = null, args = null, kwargs = null, timeout = -1, read_timeout = -1) {
data = [
'tgt': target.expression,
'fun': function,
@@ -65,9 +66,14 @@
'expr_form': target.type,
]
- if(batch != null){
+ if (batch) {
batch = batch.toString()
- if( (batch.isInteger() && batch.toInteger() > 0) || (batch.contains("%"))){
+ } else if (env.getEnvironment().containsKey('SALT_MASTER_OPT_WORKER_THREADS')) {
+ batch = env['SALT_MASTER_OPT_WORKER_THREADS'].toString()
+ }
+
+ if (batch instanceof String) {
+ if ((batch.isInteger() && batch.toInteger() > 0) || (batch.matches(/(\d){1,2}%/))){
data['client']= "local_batch"
data['batch'] = batch
}
@@ -85,6 +91,7 @@
data['timeout'] = timeout
}
+ def result = [:]
// Command will be sent using HttpRequest
if (saltId instanceof HashMap && saltId.containsKey("authToken") ) {
@@ -93,13 +100,22 @@
]
def http = new com.mirantis.mk.Http()
- return http.sendHttpPostRequest("${saltId.url}/", data, headers, read_timeout)
+ result = http.sendHttpPostRequest("${saltId.url}/", data, headers, read_timeout)
} else if (saltId instanceof HashMap) {
throw new Exception("Invalid saltId")
+ } else {
+ // Command will be sent using Pepper
+ result = runPepperCommand(data, saltId)
}
- // Command will be sent using Pepper
- return runPepperCommand(data, saltId)
+ // Convert returned Object to the same structure as from 'local' client to keep compatibility
+ if (data['client'].equals('local_batch')) {
+ def resultMap = ['return': [[:]]]
+ result['return'].each { it -> resultMap['return'][0] = it + resultMap['return'][0] }
+ return resultMap
+ } else {
+ return result
+ }
}
/**
@@ -107,13 +123,14 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
* @param pillar pillar name (optional)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def getPillar(saltId, target, pillar = null) {
+def getPillar(saltId, target, pillar = null, batch = null) {
if (pillar != null) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.get', null, [pillar.replace('.', ':')])
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.get', batch, [pillar.replace('.', ':')])
} else {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.data')
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'pillar.data', batch)
}
}
@@ -122,13 +139,14 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get grain target
* @param grain grain name (optional)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def getGrain(saltId, target, grain = null) {
+def getGrain(saltId, target, grain = null, batch = null) {
if(grain != null) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.item', null, [grain])
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.item', batch, [grain])
} else {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.items')
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'grains.items', batch)
}
}
@@ -137,10 +155,11 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get grain target
* @param config grain name (optional)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def getConfig(saltId, target, config) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'config.get', null, [config.replace('.', ':')], '--out=json')
+def getConfig(saltId, target, config, batch = null) {
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'config.get', batch, [config.replace('.', ':')], '--out=json')
}
/**
@@ -207,7 +226,7 @@
if (!params.testTargetMatcher) {
params.testTargetMatcher = params.target
}
- if (testTarget(params.saltId, params.testTargetMatcher)) {
+ if (testTarget(params.saltId, params.testTargetMatcher, params.batch)) {
return enforceState(params)
} else {
if (!params.optional) {
@@ -268,7 +287,7 @@
kwargs["queue"] = true
}
- if (params.optional == false || testTarget(params.saltId, params.target)){
+ if (params.optional == false || testTarget(params.saltId, params.target, params.batch)){
if (params.retries > 0){
def retriesCounter = 0
retry(params.retries){
@@ -312,12 +331,13 @@
* @param batch salt batch parameter integer or string with percents (optional, default null - disable batch)
* @param output do you want to print output
* @param saltArgs additional salt args eq. ["runas=aptly"]
+ * @param replacing list with maps for deletion in info message (passwords, logins, etc)
* @return output of salt command
*/
-def cmdRun(saltId, target, cmd, checkResponse = true, batch=null, output = true, saltArgs = []) {
+def cmdRun(saltId, target, cmd, checkResponse = true, batch=null, output = true, saltArgs = [], replacing = []) {
def common = new com.mirantis.mk.Common()
def originalCmd = cmd
- common.infoMsg("Running command ${cmd} on ${target}")
+ common.infoSensitivityMsg("Running command ${cmd} on ${target}", true, replacing)
if (checkResponse) {
cmd = cmd + " && echo Salt command execution success"
}
@@ -426,7 +446,7 @@
* @return output of salt command
*/
def minionsPresent(saltId, target = 'I@salt:master', target_minions = '', waitUntilPresent = true, batch=null, output = true, maxRetries = 200, answers = 1) {
- def target_hosts = getMinionsSorted(saltId, target_minions)
+ def target_hosts = getMinionsSorted(saltId, target_minions, batch)
for (t in target_hosts) {
def tgt = stripDomainName(t)
minionPresent(saltId, target, tgt, waitUntilPresent, batch, output, maxRetries, answers)
@@ -517,11 +537,12 @@
def retries = config.get('retries', 10)
def timeout = config.get('timeout', 5)
def checkAvailability = config.get('availability', true)
+ def batch = config.get('batch', null)
common.retry(retries, wait) {
if (checkAvailability) {
- minionsReachable(saltId, 'I@salt:master', target_reachable)
+ minionsReachable(saltId, 'I@salt:master', target_reachable, batch)
}
- def running = runSaltProcessStep(saltId, target, 'saltutil.running', [], null, true, timeout)
+ def running = runSaltProcessStep(saltId, target, 'saltutil.running', [], batch, true, timeout)
for (value in running.get("return")[0].values()) {
if (value != []) {
throw new Exception("Not all salt-minions are ready for execution")
@@ -534,15 +555,16 @@
* Restart and wait for salt-minions on target nodes.
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target unique identification of a minion or group of salt minions
- * @param wait timeout for the salt command if minions do not return (default 5)
- * @param maxRetries finite number of iterations to check status of a command (default 10)
+ * @param wait timeout for the salt command if minions do not return (default 10)
+ * @param maxRetries finite number of iterations to check status of a command (default 15)
+ * @param async Run salt minion restart and do not wait for response
* @return output of salt command
*/
-def restartSaltMinion(saltId, target, wait = 5, maxRetries = 10) {
+def restartSaltMinion(saltId, target, wait = 10, maxRetries = 15, async = true) {
def common = new com.mirantis.mk.Common()
common.infoMsg("Restarting salt-minion on ${target} and waiting for they are reachable.")
- runSaltProcessStep(saltId, target, 'cmd.shell', ['salt-call service.restart salt-minion'], null, true, 60)
- checkTargetMinionsReady(['saltId': saltId, 'target_reachable': target, timeout: wait, retries: maxRetries])
+ runSaltProcessStep(saltId, target, 'cmd.shell', ['salt-call service.restart salt-minion'], null, true, 60, null, async)
+ checkTargetMinionsReady(['saltId': saltId, 'target': target, timeout: wait, retries: maxRetries])
common.infoMsg("All ${target} minions are alive...")
}
@@ -652,10 +674,11 @@
* Perform complete salt sync between master and target
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def syncAll(saltId, target) {
- return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'saltutil.sync_all')
+def syncAll(saltId, target, batch = null) {
+ return runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'saltutil.sync_all', batch)
}
/**
@@ -663,12 +686,13 @@
* Method will call saltutil.refresh_pillar, saltutil.refresh_grains and saltutil.sync_all
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def fullRefresh(saltId, target){
- runSaltProcessStep(saltId, target, 'saltutil.refresh_pillar', [], null, true)
- runSaltProcessStep(saltId, target, 'saltutil.refresh_grains', [], null, true)
- runSaltProcessStep(saltId, target, 'saltutil.sync_all', [], null, true)
+def fullRefresh(saltId, target, batch=20){
+ runSaltProcessStep(saltId, target, 'saltutil.refresh_pillar', [], batch, true)
+ runSaltProcessStep(saltId, target, 'saltutil.refresh_grains', [], batch, true)
+ runSaltProcessStep(saltId, target, 'saltutil.sync_all', [], batch, true)
}
/**
@@ -709,10 +733,11 @@
* Get running minions IDs according to the target
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return list of active minions fitin
*/
-def getMinions(saltId, target) {
- def minionsRaw = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'test.ping')
+def getMinions(saltId, target, batch = null) {
+ def minionsRaw = runSaltCommand(saltId, 'local', ['expression': target, 'type': 'compound'], 'test.ping', batch)
return new ArrayList<String>(minionsRaw['return'][0].keySet())
}
@@ -720,20 +745,22 @@
* Get sorted running minions IDs according to the target
* @param saltId Salt Connection object or pepperEnv
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return list of sorted active minions fitin
*/
-def getMinionsSorted(saltId, target) {
- return getMinions(saltId, target).sort()
+def getMinionsSorted(saltId, target, batch = null) {
+ return getMinions(saltId, target, batch).sort()
}
/**
* Get first out of running minions IDs according to the target
* @param saltId Salt Connection object or pepperEnv
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return first of active minions fitin
*/
-def getFirstMinion(saltId, target) {
- def minionsSorted = getMinionsSorted(saltId, target)
+def getFirstMinion(saltId, target, batch = null) {
+ def minionsSorted = getMinionsSorted(saltId, target, batch)
return minionsSorted[0]
}
@@ -741,10 +768,11 @@
* Get running salt minions IDs without it's domain name part and its numbering identifications
* @param saltId Salt Connection object or pepperEnv
* @param target Get minions target
+ * @param batch Batch param to salt (integer or string with percents)
* @return list of active minions fitin without it's domain name part name numbering
*/
-def getMinionsGeneralName(saltId, target) {
- def minionsSorted = getMinionsSorted(saltId, target)
+def getMinionsGeneralName(saltId, target, batch = null) {
+ def minionsSorted = getMinionsSorted(saltId, target, batch)
return stripDomainName(minionsSorted[0]).replaceAll('\\d+$', "")
}
@@ -772,7 +800,7 @@
* @return Return values of a salt command
*/
def getReturnValues(output) {
- if(output.containsKey("return") && !output.get("return").isEmpty()) {
+ if(output && output.containsKey("return") && !output.get("return").isEmpty()) {
return output['return'][0].values()[0]
}
def common = new com.mirantis.mk.Common()
@@ -815,11 +843,12 @@
* Test if there are any minions to target
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Target to test
+ * @param batch Batch param to salt (integer or string with percents)
* @return bool indicating if target was succesful
*/
-def testTarget(saltId, target) {
- return getMinions(saltId, target).size() > 0
+def testTarget(saltId, target, batch = null) {
+ return getMinions(saltId, target, batch).size() > 0
}
/**
@@ -828,10 +857,11 @@
* @param target Key generating target
* @param host Key generating host
* @param keysize generated key size (optional, default 4096)
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def generateNodeKey(saltId, target, host, keysize = 4096) {
- return runSaltCommand(saltId, 'wheel', target, 'key.gen_accept', [host], ['keysize': keysize])
+def generateNodeKey(saltId, target, host, keysize = 4096, batch = null) {
+ return runSaltCommand(saltId, 'wheel', target, 'key.gen_accept', batch, [host], ['keysize': keysize])
}
/**
@@ -841,10 +871,11 @@
* @param host Metadata generating host
* @param classes Reclass classes
* @param parameters Reclass parameters
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def generateNodeMetadata(saltId, target, host, classes, parameters) {
- return runSaltCommand(saltId, 'local', target, 'reclass.node_create', [host, '_generated'], ['classes': classes, 'parameters': parameters])
+def generateNodeMetadata(saltId, target, host, classes, parameters, batch = null) {
+ return runSaltCommand(saltId, 'local', target, 'reclass.node_create', batch, [host, '_generated'], ['classes': classes, 'parameters': parameters])
}
/**
@@ -853,14 +884,15 @@
* @param target Orchestration target
* @param orchestrate Salt orchestrate params
* @param kwargs Salt orchestrate params
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def orchestrateSystem(saltId, target, orchestrate=[], kwargs = null) {
+def orchestrateSystem(saltId, target, orchestrate=[], kwargs = null, batch = null) {
//Since the runSaltCommand uses "arg" (singular) for "runner" client this won`t work correctly on old salt 2016
//cause this version of salt used "args" (plural) for "runner" client, see following link for reference:
//https://github.com/saltstack/salt/pull/32938
def common = new com.mirantis.mk.Common()
- def result = runSaltCommand(saltId, 'runner', target, 'state.orchestrate', true, orchestrate, kwargs, 7200, 7200)
+ def result = runSaltCommand(saltId, 'runner', target, 'state.orchestrate', batch, orchestrate, kwargs, 7200, 7200)
if(result != null){
if(result['return']){
def retcode = result['return'][0].get('retcode')
@@ -883,21 +915,21 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param pillar_tree Reclass pillar that has orchestrate pillar for desired stage
* @param extra_tgt Extra targets for compound
- *
+ * @param batch Batch param to salt (integer or string with percents)
* @return output of salt command
*/
-def orchestratePrePost(saltId, pillar_tree, extra_tgt = '') {
+def orchestratePrePost(saltId, pillar_tree, extra_tgt = '', batch = null) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def compound = 'I@' + pillar_tree + " " + extra_tgt
common.infoMsg("Refreshing pillars")
- runSaltProcessStep(saltId, '*', 'saltutil.refresh_pillar', [], null, true)
+ runSaltProcessStep(saltId, '*', 'saltutil.refresh_pillar', [], batch, true)
common.infoMsg("Looking for orchestrate pillars")
- if (salt.testTarget(saltId, compound)) {
- for ( node in salt.getMinionsSorted(saltId, compound) ) {
+ if (salt.testTarget(saltId, compound, batch)) {
+ for ( node in salt.getMinionsSorted(saltId, compound, batch) ) {
def pillar = salt.getPillar(saltId, node, pillar_tree)
if ( !pillar['return'].isEmpty() ) {
for ( orch_id in pillar['return'][0].values() ) {
@@ -905,7 +937,7 @@
def orch_enabled = orch_id.values()['enabled']
if ( orch_enabled ) {
common.infoMsg("Orchestrating: ${orchestrator}")
- salt.printSaltCommandResult(salt.orchestrateSystem(saltId, ['expression': node], [orchestrator]))
+ salt.printSaltCommandResult(salt.orchestrateSystem(saltId, ['expression': node], [orchestrator], null, batch))
}
}
}
@@ -919,20 +951,20 @@
* @param tgt Salt process step target
* @param fun Salt process step function
* @param arg process step arguments (optional, default [])
- * @param batch salt batch parameter integer or string with percents (optional, default null - disable batch)
+ * @param batch salt batch parameter integer or string with percents (optional, default null - disable batch). Can't be used with async
* @param output print output (optional, default true)
* @param timeout Additional argument salt api timeout
+ * @param async Run the salt command but don't wait for a reply. Can't be used with batch
* @return output of salt command
*/
-def runSaltProcessStep(saltId, tgt, fun, arg = [], batch = null, output = true, timeout = -1, kwargs = null) {
+def runSaltProcessStep(saltId, tgt, fun, arg = [], batch = null, output = true, timeout = -1, kwargs = null, async = false) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def out
common.infoMsg("Running step ${fun} ${arg} on ${tgt}")
-
- if (batch == true) {
- out = runSaltCommand(saltId, 'local_batch', ['expression': tgt, 'type': 'compound'], fun, String.valueOf(batch), arg, kwargs, timeout)
+ if (async == true) {
+ out = runSaltCommand(saltId, 'local_async', ['expression': tgt, 'type': 'compound'], fun, null, arg, kwargs, timeout)
} else {
out = runSaltCommand(saltId, 'local', ['expression': tgt, 'type': 'compound'], fun, batch, arg, kwargs, timeout)
}
@@ -1004,7 +1036,7 @@
outputResources.add(String.format("Resource: %s\n\u001B[33m%s\u001B[0m", resKey, common.prettify(resource)))
}
}else{
- if(!printOnlyChanges || resource.changes.size() > 0){
+ if(!printOnlyChanges || (resource.changes && resource.changes.size() > 0)) {
outputResources.add(String.format("Resource: %s\n\u001B[32m%s\u001B[0m", resKey, common.prettify(resource)))
}
}
@@ -1228,10 +1260,11 @@
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Targeted nodes to be checked
* @param diff Maximum time difference (in seconds) to be accepted during time sync check
+* @param batch Batch param to salt (integer or string with percents)
* @return bool Return true if time difference is <= diff and returns false if time difference is > diff
*/
-def checkClusterTimeSync(saltId, target) {
+def checkClusterTimeSync(saltId, target, batch = null) {
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
@@ -1243,7 +1276,7 @@
} else {
diff = 5
}
- out = salt.runSaltProcessStep(saltId, target, 'status.time', '%s')
+ out = salt.runSaltProcessStep(saltId, target, 'status.time', '%s', batch)
outParsed = out['return'][0]
def outKeySet = outParsed.keySet()
for (key in outKeySet) {
@@ -1347,12 +1380,31 @@
def isPackageInstalled(Map params) {
def output = params.get('output', true)
- def res = runSaltProcessStep(params.saltId, params.target, "pkg.info_installed", params.packageName, null, output)['return'][0]
- for (int i = 0; i < res.size(); i++) {
- def key = res.keySet()[i]
- if (!(res[key] instanceof Map && res[key].containsKey(params.packageName))) {
- return false
+ def res = runSaltProcessStep(params.saltId, params.target, "pkg.list_pkgs", [], null, output)['return'][0]
+ if (res) {
+ for (int i = 0; i < res.size(); i++) {
+ def key = res.keySet()[i]
+ if (!(res[key] instanceof Map && res[key].get(params.packageName.toString(), false))) {
+ return false
+ }
}
+ return true
+ } else {
+ return false
}
- return true
}
+
+/**
+* Returns nubmer of worker_threads set for Salt Master
+*
+* @param saltId Salt Connection object or pepperEnv
+*
+*/
+def getWorkerThreads(saltId) {
+ if (env.getEnvironment().containsKey('SALT_MASTER_OPT_WORKER_THREADS')) {
+ return env['SALT_MASTER_OPT_WORKER_THREADS'].toString()
+ }
+ def threads = cmdRun(saltId, "I@salt:master", "cat /etc/salt/master.d/master.conf | grep worker_threads | cut -f 2 -d ':'", true, null, true)
+ return threads['return'][0].values()[0].replaceAll('Salt command execution success','').trim()
+}
+
diff --git a/src/com/mirantis/mk/SaltModelTesting.groovy b/src/com/mirantis/mk/SaltModelTesting.groovy
index 1b9b3e8..c7afbb0 100644
--- a/src/com/mirantis/mk/SaltModelTesting.groovy
+++ b/src/com/mirantis/mk/SaltModelTesting.groovy
@@ -103,10 +103,11 @@
def defaultRepos = readYaml text: defaultExtraReposYaml
// Don't check for magic, if set explicitly
if (updateSaltFormulas) {
+ def updateSaltFormulasRev = config.get('updateSaltFormulasRev', distribRevision)
if (!oldRelease && distribRevision != releaseVersionQ4) {
defaultRepos['repo']['mcp_saltformulas_update'] = [
- 'source' : "deb [arch=amd64] http://mirror.mirantis.com/update/${distribRevision}/salt-formulas/xenial xenial main",
- 'repo_key': "http://mirror.mirantis.com/update/${distribRevision}/salt-formulas/xenial/archive-salt-formulas.key"
+ 'source' : "deb [arch=amd64] http://mirror.mirantis.com/update/${updateSaltFormulasRev}/salt-formulas/xenial xenial main",
+ 'repo_key': "http://mirror.mirantis.com/update/${updateSaltFormulasRev}/salt-formulas/xenial/archive-salt-formulas.key"
]
}
}
@@ -170,16 +171,16 @@
}
try {
- common.warningMsg("IgnoreMe:Force cleanup slave.Ignore docker-daemon errors")
- timeout(time: 10, unit: 'SECONDS') {
- sh(script: "set -x; docker kill ${dockerContainerName} || true", returnStdout: true)
- }
- timeout(time: 10, unit: 'SECONDS') {
- sh(script: "set -x; docker rm --force ${dockerContainerName} || true", returnStdout: true)
+ timeout(time: 30, unit: 'SECONDS') {
+ if (sh(script: "docker inspect ${dockerContainerName}", returnStatus: true) == 0) {
+ common.warningMsg("Verify that container is not running. Ignore further docker-daemon errors")
+ sh(script: "set -x; test \$(docker inspect -f '{{.State.Running}}' ${dockerContainerName} 2>/dev/null) = 'true' && docker kill ${dockerContainerName}", returnStdout: true)
+ sh(script: "set -x; docker rm --force ${dockerContainerName} || true", returnStdout: true)
+ }
}
}
catch (Exception er) {
- common.warningMsg("IgnoreMe:Timeout to delete test docker container with force!Message:\n" + er.toString())
+ common.warningMsg("IgnoreMe:Timeout to delete test docker container with force! Message:\n" + er.toString())
}
if (TestMarkerResult) {
diff --git a/src/com/mirantis/mk/Workflow.groovy b/src/com/mirantis/mk/Workflow.groovy
new file mode 100644
index 0000000..a3502bd
--- /dev/null
+++ b/src/com/mirantis/mk/Workflow.groovy
@@ -0,0 +1,240 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Run a simple workflow
+ *
+ * Function runScenario() executes a sequence of jobs, like
+ * - Parameters for the jobs are taken from the 'env' object
+ * - URLs of artifacts from completed jobs may be passed
+ * as parameters to the next jobs.
+ *
+ * No constants, environment specific logic or other conditional dependencies.
+ * All the logic should be placed in the workflow jobs, and perform necessary
+ * actions depending on the job parameters.
+ * The runScenario() function only provides the
+ *
+ */
+
+
+/**
+ * Run a Jenkins job using the collected parameters
+ *
+ * @param job_name Name of the running job
+ * @param job_parameters Map that declares which values from global_variables should be used, in the following format:
+ * {'PARAM_NAME': {'type': <job parameter $class name>, 'use_variable': <a key from global_variables>}, ...}
+ * or
+ * {'PARAM_NAME': {'type': <job parameter $class name>, 'get_variable_from_url': <a key from global_variables which contains URL with required content>}, ...}
+ * or
+ * {'PARAM_NAME': {'type': <job parameter $class name>, 'use_template': <a GString multiline template with variables from global_variables>}, ...}
+ * @param global_variables Map that keeps the artifact URLs and used 'env' objects:
+ * {'PARAM1_NAME': <param1 value>, 'PARAM2_NAME': 'http://.../artifacts/param2_value', ...}
+ * @param propagate Boolean. If false: allows to collect artifacts after job is finished, even with FAILURE status
+ * If true: immediatelly fails the pipeline. DO NOT USE 'true' if you want to collect artifacts
+ * for 'finally' steps
+ */
+def runJob(job_name, job_parameters, global_variables, Boolean propagate = false) {
+ def parameters = []
+ def http = new com.mirantis.mk.Http()
+ def engine = new groovy.text.GStringTemplateEngine()
+ def template
+ def base = [:]
+ base["url"] = ''
+ def variable_content
+
+ // Collect required parameters from 'global_variables' or 'env'
+ for (param in job_parameters) {
+ if (param.value.containsKey('use_variable')) {
+ if (!global_variables[param.value.use_variable]) {
+ global_variables[param.value.use_variable] = env[param.value.use_variable] ?: ''
+ }
+ parameters.add([$class: "${param.value.type}", name: "${param.key}", value: global_variables[param.value.use_variable]])
+ println "${param.key}: <${param.value.type}> ${global_variables[param.value.use_variable]}"
+ } else if (param.value.containsKey('get_variable_from_url')) {
+ if (!global_variables[param.value.get_variable_from_url]) {
+ global_variables[param.value.get_variable_from_url] = env[param.value.get_variable_from_url] ?: ''
+ }
+ if (global_variables[param.value.get_variable_from_url]) {
+ variable_content = http.restGet(base, global_variables[param.value.get_variable_from_url]).trim()
+ parameters.add([$class: "${param.value.type}", name: "${param.key}", value: variable_content])
+ println "${param.key}: <${param.value.type}> ${variable_content}"
+ } else {
+ println "${param.key} is empty, skipping get_variable_from_url"
+ }
+ } else if (param.value.containsKey('use_template')) {
+ template = engine.createTemplate(param.value.use_template).make(global_variables)
+ parameters.add([$class: "${param.value.type}", name: "${param.key}", value: template.toString()])
+ println "${param.key}: <${param.value.type}>\n${template.toString()}"
+ }
+ }
+
+ // Build the job
+ def job_info = build job: "${job_name}", parameters: parameters, propagate: propagate
+ return job_info
+}
+
+/**
+ * Store URLs of the specified artifacts to the global_variables
+ *
+ * @param build_url URL of the completed job
+ * @param step_artifacts Map that contains artifact names in the job, and variable names
+ * where the URLs to that atrifacts should be stored, for example:
+ * {'ARTIFACT1': 'logs.tar.gz', 'ARTIFACT2': 'test_report.xml', ...}
+ * @param global_variables Map that will keep the artifact URLs. Variable 'ARTIFACT1', for example,
+ * be used in next job parameters: {'ARTIFACT1_URL':{ 'use_variable': 'ARTIFACT1', ...}}
+ *
+ * If the artifact with the specified name not found, the parameter ARTIFACT1_URL
+ * will be empty.
+ *
+ */
+def storeArtifacts(build_url, step_artifacts, global_variables) {
+ def http = new com.mirantis.mk.Http()
+ def base = [:]
+ base["url"] = build_url
+ def job_config = http.restGet(base, "/api/json/")
+ def job_artifacts = job_config['artifacts']
+ for (artifact in step_artifacts) {
+ def job_artifact = job_artifacts.findAll { item -> artifact.value == item['fileName'] || artifact.value == item['relativePath'] }
+ if (job_artifact.size() == 1) {
+ // Store artifact URL
+ def artifact_url = "${build_url}artifact/${job_artifact[0]['relativePath']}"
+ global_variables[artifact.key] = artifact_url
+ println "Artifact URL ${artifact_url} stored to ${artifact.key}"
+ } else if (job_artifact.size() > 1) {
+ // Error: too many artifacts with the same name, fail the job
+ error "Multiple artifacts ${artifact.value} for ${artifact.key} found in the build results ${build_url}, expected one:\n${job_artifact}"
+ } else {
+ // Warning: no artifact with expected name
+ println "Artifact ${artifact.value} for ${artifact.key} not found in the build results ${build_url}, found the following artifacts:\n${job_artifacts}"
+ global_variables[artifact.key] = ''
+ }
+ }
+}
+
+
+/**
+ * Run the workflow or final steps one by one
+ *
+ * @param steps List of steps (Jenkins jobs) to execute
+ * @param global_variables Map where the collected artifact URLs and 'env' objects are stored
+ * @param failed_jobs Map with failed job names and result statuses, to report it later
+ * @param propagate Boolean. If false: allows to collect artifacts after job is finished, even with FAILURE status
+ * If true: immediatelly fails the pipeline. DO NOT USE 'true' with runScenario().
+ */
+def runSteps(steps, global_variables, failed_jobs, Boolean propagate = false) {
+ for (step in steps) {
+ stage("Running job ${step['job']}") {
+
+ def job_name = step['job']
+ def job_parameters = step['parameters']
+ // Collect job parameters and run the job
+ def job_info = runJob(job_name, job_parameters, global_variables, propagate)
+ def job_result = job_info.getResult()
+ def build_url = job_info.getAbsoluteUrl()
+ def build_description = job_info.getDescription()
+
+ currentBuild.description += "<a href=${build_url}>${job_name}</a>: ${job_result}<br>"
+ // Import the remote build description into the current build
+ if (build_description) { // TODO - add also the job status
+ currentBuild.description += build_description
+ }
+
+ // Store links to the resulting artifacts into 'global_variables'
+ storeArtifacts(build_url, step['artifacts'], global_variables)
+
+ // Job failed, fail the build or keep going depending on 'ignore_failed' flag
+ if (job_result != "SUCCESS") {
+ def job_ignore_failed = step['ignore_failed'] ?: false
+ failed_jobs[build_url] = job_result
+ if (job_ignore_failed) {
+ println "Job ${build_url} finished with result: ${job_result}"
+ } else {
+ currentBuild.result = job_result
+ error "Job ${build_url} finished with result: ${job_result}"
+ }
+ } // if (job_result == "SUCCESS")
+ } // stage ("Running job ${step['job']}")
+ } // for (step in scenario['workflow'])
+}
+
+/**
+ * Run the workflow scenario
+ *
+ * @param scenario: Map with scenario steps.
+
+ * There are two keys in the scenario:
+ * workflow: contains steps to run deploy and test jobs
+ * finally: contains steps to run report and cleanup jobs
+ *
+ * Scenario execution example:
+ *
+ * scenario_yaml = """\
+ * workflow:
+ * - job: deploy-kaas
+ * ignore_failed: false
+ * parameters:
+ * KAAS_VERSION:
+ * type: StringParameterValue
+ * use_variable: KAAS_VERSION
+ * artifacts:
+ * KUBECONFIG_ARTIFACT: artifacts/management_kubeconfig
+ * DEPLOYED_KAAS_VERSION: artifacts/management_version
+ *
+ * - job: test-kaas-ui
+ * ignore_failed: false
+ * parameters:
+ * KUBECONFIG_ARTIFACT_URL:
+ * type: StringParameterValue
+ * use_variable: KUBECONFIG_ARTIFACT
+ * KAAS_VERSION:
+ * type: StringParameterValue
+ * get_variable_from_url: DEPLOYED_KAAS_VERSION
+ * artifacts:
+ * REPORT_SI_KAAS_UI: artifacts/test_kaas_ui_result.xml
+ *
+ * finally:
+ * - job: testrail-report
+ * ignore_failed: true
+ * parameters:
+ * KAAS_VERSION:
+ * type: StringParameterValue
+ * get_variable_from_url: DEPLOYED_KAAS_VERSION
+ * REPORTS_LIST:
+ * type: TextParameterValue
+ * use_template: |
+ * REPORT_SI_KAAS_UI: \$REPORT_SI_KAAS_UI
+ * """
+ *
+ * runScenario(scenario)
+ *
+ */
+
+def runScenario(scenario) {
+
+ // Clear description before adding new messages
+ currentBuild.description = ''
+ // Collect the parameters for the jobs here
+ global_variables = [:]
+ // List of failed jobs to show at the end
+ failed_jobs = [:]
+
+ try {
+ // Run the 'workflow' jobs
+ runSteps(scenario['workflow'], global_variables, failed_jobs)
+
+ } catch (InterruptedException x) {
+ error "The job was aborted"
+
+ } catch (e) {
+ error("Build failed: " + e.toString())
+
+ } finally {
+ // Run the 'finally' jobs
+ runSteps(scenario['finally'], global_variables, failed_jobs)
+
+ if (failed_jobs) {
+ println "Failed jobs: ${failed_jobs}"
+ currentBuild.result = "FAILED"
+ }
+ } // try
+}