Add update glusterfs pipelines
Depends-On: https://gerrit.mcp.mirantis.com/39552
Related-Prod: PROD-29243
Change-Id: I7b4b8dc6b9cfdf154b4fbf6d3d35a2f29a040c73
(cherry picked from commit 1a4e1427a204a854401c861269801fbfed23783e)
diff --git a/update-glusterfs-clients.groovy b/update-glusterfs-clients.groovy
new file mode 100644
index 0000000..02e889a
--- /dev/null
+++ b/update-glusterfs-clients.groovy
@@ -0,0 +1,119 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_SERVER_STATUS Does not validate server availability/status before update
+ * IGNORE_SERVER_VERSION Does not validate that all servers have been updated
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+@NonCPS
+def getNextNode() {
+ for (n in hudson.model.Hudson.instance.slaves) {
+ node_name = n.getNodeName()
+ if (node_name != env.SLAVE_NAME) {
+ return node_name
+ }
+ }
+}
+
+def update() {
+ def pEnv = "pepperEnv"
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def pkg_name = 'glusterfs-client'
+
+ /**
+ * - choose only those hosts where update is available. Exclude minion on which job is running
+ * - validate that all gluasterfs servers are in normal working state. Can be skipped with option
+ * - validate that glusterfs on all servers has been updated, otherwise stop update. Can be skipped with option
+ * - run update state on one client at a time
+ */
+
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+ if (all_minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+
+ minions = []
+ for (minion in all_minions) {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+ current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+ slave_container_id = salt.getReturnValues(salt.cmdRun(pEnv, minion, "which docker >/dev/null && docker ps --filter name=jenkins_${env.NODE_NAME} --filter status=running -q", false)).split('\n')[0]
+ if (latest_version != current_version) {
+ if (!slave_container_id.isEmpty() && !minion.startsWith('cfg')) {
+ env.SLAVE_NAME = env.NODE_NAME
+ env.SLAVE_MINION = minion
+ } else {
+ minions.add(minion)
+ }
+ } else {
+ common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+ }
+ }
+ }
+ if (!minions.isEmpty()) {
+ if (!IGNORE_SERVER_STATUS.toBoolean()){
+ stage('Validate servers availability') {
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+ common.successMsg("All glusterfs servers are available")
+ }
+ } else {
+ common.warningMsg("Check of glusterfs servers availability has been disabled")
+ }
+ if (!IGNORE_SERVER_VERSION.toBoolean()){
+ stage('Check that all glusterfs servers have been updated') {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minions[0], 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0].split('-')[0]
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "glusterfsd --version | head -n1 | awk '{print \$2}' | egrep '^${latest_version}' || echo none", latest_version, true, true, null, true, 1)
+ common.successMsg('All glusterfs servers have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of glusterfs servers' version has been disabled")
+ }
+ // Actual update
+ for (tgt in minions) {
+ stage("Update glusterfs on ${tgt}") {
+ salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.client'])
+ }
+ }
+ } else if (env.SLAVE_MINION == null) {
+ common.warningMsg("No hosts to update glusterfs on")
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+ throw e
+ }
+}
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ update()
+ }
+ // Perform an update from another slave to finish update on previous slave host
+ if (env.SLAVE_NAME != null && !env.SLAVE_NAME.isEmpty()) {
+ node(getNextNode()) {
+ update()
+ }
+ }
+}
diff --git a/update-glusterfs-cluster-op-version.groovy b/update-glusterfs-cluster-op-version.groovy
new file mode 100644
index 0000000..9623481
--- /dev/null
+++ b/update-glusterfs-cluster-op-version.groovy
@@ -0,0 +1,110 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_CLIENT_VERSION Does not validate that all clients have been updated
+ * IGNORE_SERVER_VERSION Does not validate that all servers have been updated
+ * CLUSTER_OP_VERSION GlusterFS cluster.op-verion option to set. Default is to be set to current cluster.max-op-version if available.
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+/**
+ * - ensure that cluster.op-version can be updated
+ * - check that all servers have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - check that all clients have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - set cluster.op-version
+ */
+
+/**
+ * Convert glusterfs' cluster.op-version to regular version string
+ *
+ * @param version string representing cluster.op-version, i.e. 50400
+ * @return string version number, i.e. 5.4.0
+ */
+def convertVersion(version) {
+ new_version = version[0]
+ for (i=1;i<version.length();i++) {
+ if (i%2 == 0) {
+ new_version += version[i]
+ } else if (version[i] == '0') {
+ new_version += '.'
+ } else {
+ new_version += '.' + version[i]
+ }
+ }
+ return new_version
+}
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+ stage('Get current cluster.op-version') {
+ volume = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume list")).split('\n')[0]
+ currentOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+ }
+ if (CLUSTER_OP_VERSION.isEmpty()) {
+ stage('Get cluster.max-op-version') {
+ CLUSTER_OP_VERSION = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get all cluster.max-op-version 2>/dev/null | grep cluster.max-op-version | awk '{print \$2}'")).split('\n')[0]
+ }
+ }
+ if (CLUSTER_OP_VERSION.isEmpty() || CLUSTER_OP_VERSION.length() != 5) {
+ msg = 'No cluster.op-version specified to set'
+ common.errorMsg(msg)
+ currentBuild.result = "FAILURE"
+ currentBuild.description = msg
+ } else if (currentOpVersion == CLUSTER_OP_VERSION) {
+ common.warningMsg("cluster.op-version is already set to ${currentOpVersion}")
+ } else {
+ version = convertVersion(CLUSTER_OP_VERSION)
+ if (!IGNORE_SERVER_VERSION.toBoolean()){
+ stage('Check that all servers have been updated') {
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+ common.successMsg('All servers have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of servers' version has been disabled")
+ }
+ if (!IGNORE_CLIENT_VERSION.toBoolean()){
+ stage('Check that all clients have been updated') {
+ salt.commandStatus(pEnv, 'I@glusterfs:client', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+ common.successMsg('All clients have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of clients' version has been disabled")
+ }
+ stage("Update cluster.op-version") {
+ salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume set all cluster.op-version ${CLUSTER_OP_VERSION}")
+ }
+ stage("Validate cluster.op-version") {
+ newOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+ if (newOpVersion != CLUSTER_OP_VERSION) {
+ throw new Exception("cluster.op-version was not set to ${CLUSTER_OP_VERSION}")
+ }
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
+}
diff --git a/update-glusterfs-servers.groovy b/update-glusterfs-servers.groovy
new file mode 100644
index 0000000..23b280d
--- /dev/null
+++ b/update-glusterfs-servers.groovy
@@ -0,0 +1,92 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_SERVER_STATUS Does not validate server availability/status before update
+ * IGNORE_NON_REPLICATED_VOLUMES Update GlusterFS even there is a non-replicated volume(s)
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+def pkg_name = 'glusterfs-server'
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+/**
+ * - choose only those hosts where update is available
+ * - validate that all servers are in normal working state. Can be skipped with option
+ * - validate all volumes are replicated. If there is a non-replicated volume stop update. Can be skipped with option
+ * - run update state on one server at a time
+ */
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+ if (all_minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+ minions = []
+ for (minion in all_minions) {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+ current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+ if (latest_version != current_version) {
+ minions.add(minion)
+ } else {
+ common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+ }
+ }
+ }
+ if (!minions.isEmpty()) {
+ if (!IGNORE_SERVER_STATUS.toBoolean()){
+ stage('Validate servers availability') {
+ salt.commandStatus(pEnv, TARGET_SERVERS, "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+ common.successMsg("All servers are available")
+ }
+ } else {
+ common.warningMsg("Check of servers availability has been disabled")
+ }
+ if (!IGNORE_NON_REPLICATED_VOLUMES.toBoolean()){
+ stage('Check that all volumes are replicated') {
+ salt.commandStatus(pEnv, TARGET_SERVERS, "gluster volume info | fgrep 'Type:' | fgrep -v Replicate", null, false, true, null, true, 1)
+ common.successMsg("All volumes are replicated")
+ }
+ } else {
+ common.warningMsg("Check of volumes' replication has been disabled. Be aware, you may lost data during update!")
+ }
+ // Actual update
+ for (tgt in minions) {
+ stage("Update glusterfs on ${tgt}") {
+ salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.server'])
+ }
+ }
+ } else {
+ common.warningMsg("No hosts to update glusterfs on")
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+ throw e
+ }
+ }
+}
diff --git a/update-glusterfs.groovy b/update-glusterfs.groovy
new file mode 100644
index 0000000..3ff0649
--- /dev/null
+++ b/update-glusterfs.groovy
@@ -0,0 +1,103 @@
+/**
+ * Complete update glusterfs pipeline
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+def waitGerrit(salt_target, wait_timeout) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def pEnv = "pepperEnv"
+ python.setupPepperVirtualenv(pEnv, env.SALT_MASTER_URL, env.SALT_MASTER_CREDENTIALS)
+
+ salt.fullRefresh(pEnv, salt_target)
+
+ def gerrit_master_url = salt.getPillar(pEnv, salt_target, '_param:gerrit_master_url')
+
+ if(!gerrit_master_url['return'].isEmpty()) {
+ gerrit_master_url = gerrit_master_url['return'][0].values()[0]
+ } else {
+ gerrit_master_url = ''
+ }
+
+ if (gerrit_master_url != '') {
+ common.infoMsg('Gerrit master url "' + gerrit_master_url + '" retrieved at _param:gerrit_master_url')
+ } else {
+ common.infoMsg('Gerrit master url could not be retrieved at _param:gerrit_master_url. Falling back to gerrit pillar')
+
+ def gerrit_host
+ def gerrit_http_port
+ def gerrit_http_scheme
+ def gerrit_http_prefix
+
+ def host_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:host')
+ gerrit_host = salt.getReturnValues(host_pillar)
+
+ def port_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:http_port')
+ gerrit_http_port = salt.getReturnValues(port_pillar)
+
+ def scheme_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:protocol')
+ gerrit_http_scheme = salt.getReturnValues(scheme_pillar)
+
+ def prefix_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:url_prefix')
+ gerrit_http_prefix = salt.getReturnValues(prefix_pillar)
+
+ gerrit_master_url = gerrit_http_scheme + '://' + gerrit_host + ':' + gerrit_http_port + gerrit_http_prefix
+
+ }
+
+ timeout(wait_timeout) {
+ common.infoMsg('Waiting for Gerrit to come up..')
+ def check_gerrit_cmd = 'while true; do curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + gerrit_master_url + '/ | grep 200 && break || sleep 1; done'
+ salt.cmdRun(pEnv, salt_target, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c -- ' + '"' + check_gerrit_cmd + '"')
+ }
+}
+
+def waitJenkins(salt_target, wait_timeout) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def pEnv = "pepperEnv"
+ python.setupPepperVirtualenv(pEnv, env.SALT_MASTER_URL, env.SALT_MASTER_CREDENTIALS)
+
+ salt.fullRefresh(pEnv, salt_target)
+
+ // Jenkins
+ def jenkins_master_host = salt.getReturnValues(salt.getPillar(pEnv, salt_target, '_param:jenkins_master_host'))
+ def jenkins_master_port = salt.getReturnValues(salt.getPillar(pEnv, salt_target, '_param:jenkins_master_port'))
+ def jenkins_master_protocol = salt.getReturnValues(salt.getPillar(pEnv, salt_target, '_param:jenkins_master_protocol'))
+ def jenkins_master_url_prefix = salt.getReturnValues(salt.getPillar(pEnv, salt_target, '_param:jenkins_master_url_prefix'))
+ jenkins_master_url = "${jenkins_master_protocol}://${jenkins_master_host}:${jenkins_master_port}${jenkins_master_url_prefix}"
+
+ timeout(wait_timeout) {
+ common.infoMsg('Waiting for Jenkins to come up..')
+ def check_jenkins_cmd = 'while true; do curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + jenkins_master_url + '/whoAmI/ | grep 200 && break || sleep 1; done'
+ salt.cmdRun(pEnv, salt_target, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c -- ' + '"' + check_jenkins_cmd + '"')
+ }
+}
+
+node() {
+ stage('Update glusterfs servers') {
+ build(job: 'update-glusterfs-servers')
+ }
+ sleep 180
+ stage('Update glusterfs clients') {
+ build(job: 'update-glusterfs-clients')
+ }
+ waitJenkins('I@jenkins:client', 300)
+ waitGerrit('I@gerrit:client', 300)
+ stage('Update glusterfs cluster.op-version') {
+ build(job: 'update-glusterfs-cluster-op-version')
+ }
+}