Merge "Job for updating Ceph to the latest minor versions of packages"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index f05735a..eb9cc8e 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -134,12 +134,19 @@
}
if (STACK_REUSE.toBoolean() == false) {
- // Don't allow to set custom heat stack name
+ // TODO(vsaienko): remove stack creation from this pipeline to separate job
+ // Allow to set custom stack name but user-id will be added anyway
+ // This will fix issue with cleanup when job is aborted by jenkins and
+ // still guarantee stack count per user.
+ def stackNameSuffix = "${JOB_NAME}-${BUILD_NUMBER}"
+ if (STACK_NAME != ''){
+ stackNameSuffix = STACK_NAME
+ }
wrap([$class: 'BuildUser']) {
if (env.BUILD_USER_ID) {
- STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
+ STACK_NAME = "${env.BUILD_USER_ID}-${stackNameSuffix}"
} else {
- STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
+ STACK_NAME = "jenkins-${stackNameSuffix}"
}
currentBuild.description = STACK_NAME
}
diff --git a/galera-cluster-verify-restore.groovy b/galera-cluster-verify-restore.groovy
index b8826ec..2a575cc 100644
--- a/galera-cluster-verify-restore.groovy
+++ b/galera-cluster-verify-restore.groovy
@@ -29,11 +29,9 @@
if (resultCode == 129) {
common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification and restoration.")
currentBuild.result = "FAILURE"
- return
} else if (resultCode == 130) {
common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification and restoration.")
currentBuild.result = "FAILURE"
- return
}
}
if (resultCode == 1) {
@@ -50,5 +48,15 @@
common.errorMsg("Restoration process has failed.")
}
}
+ stage('Verify restoration result') {
+ exitCode = openstack.verifyGaleraStatus(pepperEnv, false)
+ if (exitCode >= 1) {
+ common.errorMsg("Restoration procedure was probably not successful. See verification report for more information.")
+ currentBuild.result = "FAILURE"
+ } else {
+ common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
+ currentBuild.result = "SUCCESS"
+ }
+ }
}
}
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 79de867..01fc0f8 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -28,6 +28,8 @@
* KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH Сalico/ipam binary hash. Should be null if update rolling via reclass-system level
* KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE Target calico/kube-controllers image. May be null in case of reclass-system rollout.
* CALICO_UPGRADE_VERSION Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
+ * KUBERNETES_ETCD_SOURCE Target etcd binary. May be null in case of reclass-system rollout.
+ * KUBERNETES_ETCD_SOURCE_HASH Target etcd binary checksum. May be null in case of reclass-system rollout.
*
**/
import groovy.json.JsonSlurper
@@ -87,6 +89,27 @@
}
}
+def overrideEtcdSource(pepperEnv) {
+ def salt = new com.mirantis.mk.Salt()
+
+ def k8sSaltOverrides = """
+ kubernetes_etcd_source: ${KUBERNETES_ETCD_SOURCE}
+ kubernetes_etcd_source_hash: ${KUBERNETES_ETCD_SOURCE_HASH}
+ """
+ stage("Override etcd binaries to target version") {
+ salt.setSaltOverrides(pepperEnv, k8sSaltOverrides)
+ }
+}
+
+def performEtcdUpdateAndServicesRestart(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Performing etcd update and services restart on ${target}") {
+ salt.enforceState(pepperEnv, target, "etcd.server.service")
+ salt.cmdRun(pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl cluster-health")
+ }
+}
+
def performKubernetesComputeUpdate(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
@@ -100,7 +123,7 @@
def salt = new com.mirantis.mk.Salt()
stage("Execute Kubernetes control plane update on ${target}") {
- salt.enforceStateWithExclude(pepperEnv, target, "kubernetes", "kubernetes.master.setup")
+ salt.enforceStateWithExclude(pepperEnv, target, "kubernetes", "kubernetes.master.setup,kubernetes.master.kube-addons")
// Restart kubelet
salt.runSaltProcessStep(pepperEnv, target, 'service.restart', ['kubelet'])
}
@@ -207,6 +230,69 @@
}
}
+def buildDaemonsetMap(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def daemonset_lists
+ daemonset_lists = salt.cmdRun(pepperEnv, target, "kubectl get ds --all-namespaces | tail -n+2 | awk '{print \$2, \$1}'"
+ )['return'][0].values()[0].replaceAll('Salt command execution success','').tokenize("\n")
+ def daemonset_map = []
+ for (ds in daemonset_lists) {
+ a = ds.tokenize(" ")
+ daemonset_map << a
+ }
+ print("Built daemonset map")
+ print(daemonset_map)
+ return daemonset_map
+}
+
+def purgeDaemonsetPods(pepperEnv, target, daemonSetMap) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+ def nodeShortName = target.tokenize(".")[0]
+ firstTarget = salt.getFirstMinion(pepperEnv, originalTarget)
+
+ if (daemonSetMap) {
+ stage("Purging daemonset-managed pods on ${target}") {
+ for (ds in daemonSetMap) {
+ print("Purging "+ ds[0] +" inside "+ ds[1] +" namespace")
+ salt.cmdRun(pepperEnv, firstTarget, "kubectl get po -n ${ds[1]} -o wide | grep ${nodeShortName}" +
+ " | grep ${ds[0]} | awk '{print \$1}' | xargs --no-run-if-empty kubectl delete po -n ${ds[1]} --grace-period=0 --force")
+ }
+ }
+ }
+}
+
+def isNodeReady(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+ def nodeShortName = target.tokenize(".")[0]
+ firstTarget = salt.getFirstMinion(pepperEnv, originalTarget)
+
+ status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no | grep ${nodeShortName} | awk '{print \$2}'"
+ )['return'][0].values()[0].replaceAll('Salt command execution success',''
+ ).replaceAll(',SchedulingDisabled','').trim()
+
+ if (status == "Ready") {
+ return true
+ } else {
+ return false
+ }
+}
+
+def rebootKubernetesNode(pepperEnv, target, times=15, delay=10) {
+ def common = new com.mirantis.mk.Common()
+ def debian = new com.mirantis.mk.Debian()
+
+ stage("Rebooting ${target}") {
+ debian.osReboot(pepperEnv, target)
+ common.retry(times, delay) {
+ if(!isNodeReady(pepperEnv, target)) {
+ error("Node still not in Ready state...")
+ }
+ }
+ }
+}
+
def upgradeDocker(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
@@ -579,6 +665,9 @@
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
+ def daemonsetMap = buildDaemonsetMap(pepperEnv, ctl_node)
+
if (CONFORMANCE_RUN_BEFORE.toBoolean()) {
def target = CTL_TARGET
def mcp_repo = ARTIFACTORY_URL
@@ -623,9 +712,6 @@
* as Calico etcd schema has different formats for Calico v2.x and Calico v3.x.
*/
if (UPGRADE_CALICO_V2_TO_V3.toBoolean()) {
- // one CTL node will be used for running upgrade of Calico etcd schema
- def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
-
// get ETCD_ENDPOINTS in use by Calico
def ep_str = salt.cmdRun(pepperEnv, ctl_node, "cat /etc/calico/calicoctl.cfg | grep etcdEndpoints")['return'][0].values()[0]
ETCD_ENDPOINTS = ep_str.split("\n")[0].tokenize(' ')[1]
@@ -648,6 +734,17 @@
}
/*
+ * Execute etcd update
+ */
+ if ((common.validInputParam('KUBERNETES_ETCD_SOURCE')) && (common.validInputParam('KUBERNETES_ETCD_SOURCE_HASH'))) {
+ overrideEtcdSource(pepperEnv)
+ }
+ def targetHosts = salt.getMinionsSorted(pepperEnv, "I@etcd:server")
+ for (t in targetHosts) {
+ performEtcdUpdateAndServicesRestart(pepperEnv, t)
+ }
+
+ /*
* Execute k8s update
*/
if (updates.contains("ctl")) {
@@ -665,6 +762,10 @@
regenerateCerts(pepperEnv, t)
performKubernetesControlUpdate(pepperEnv, t)
updateAddonManager(pepperEnv, t)
+ if (daemonsetMap) {
+ purgeDaemonsetPods(pepperEnv, t, daemonsetMap)
+ rebootKubernetesNode(pepperEnv, t)
+ }
uncordonNode(pepperEnv, t)
}
}
@@ -693,6 +794,10 @@
drainNode(pepperEnv, t)
regenerateCerts(pepperEnv, t)
performKubernetesComputeUpdate(pepperEnv, t)
+ if (daemonsetMap) {
+ purgeDaemonsetPods(pepperEnv, t, daemonsetMap)
+ rebootKubernetesNode(pepperEnv, t)
+ }
uncordonNode(pepperEnv, t)
}
}
@@ -701,7 +806,6 @@
}
}
- def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
if (calicoEnabled(pepperEnv, ctl_node)) {
checkCalicoClusterState(pepperEnv, POOL)
}
@@ -732,4 +836,4 @@
throw e
}
}
-}
+}
\ No newline at end of file