blob: 01fc0f802f4aed1835f256918aef3ebe1804413c [file] [log] [blame]
Victor Ryzhenkinef34a022018-06-22 19:36:13 +04001/**
2 * Update kuberentes cluster
3 *
4 * Expected parameters:
Victor Ryzhenkin3401ee62019-01-18 06:34:26 +04005 * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
6 * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
7 * KUBERNETES_HYPERKUBE_SOURCE Versioned hyperkube binary to update control plane from. Should be null if update rolling via reclass-system level
8 * KUBERNETES_HYPERKUBE_SOURCE_HASH Versioned hyperkube binary to update control plane from. Should be null if update rolling via reclass-system level
9 * KUBERNETES_PAUSE_IMAGE Kubernetes pause image should have same version as hyperkube. May be null in case of reclass-system rollout
10 * TARGET_UPDATES Comma separated list of nodes to update (Valid values are ctl,cmp)
11 * CTL_TARGET Salt targeted kubernetes CTL nodes (ex. I@kubernetes:master). Kubernetes control plane
12 * CMP_TARGET Salt targeted compute nodes (ex. cmp* and 'I@kubernetes:pool') Kubernetes computes
13 * PER_NODE Target nodes will be managed one by one (bool)
14 * SIMPLE_UPGRADE Use previous version of upgrade without conron/drain abilities
15 * CONFORMANCE_RUN_AFTER Run Kubernetes conformance tests after update
16 * CONFORMANCE_RUN_BEFORE Run Kubernetes conformance tests before update
17 * TEST_K8S_API_SERVER Kubernetes API server address for test execution
18 * ARTIFACTORY_URL Artifactory URL where docker images located. Needed to correctly fetch conformance images.
19 * UPGRADE_CALICO_V2_TO_V3 Perform Calico upgrade from v2 to v3.
20 * KUBERNETES_CALICO_IMAGE Target calico/node image. May be null in case of reclass-system rollout.
21 * KUBERNETES_CALICO_CALICOCTL_SOURCE Versioned calico/ctl binary. Should be null if update rolling via reclass-system level
22 * KUBERNETES_CALICO_CALICOCTL_SOURCE_HASH Calico/ctl binary md5 hash. Should be null if update rolling via reclass-system level
23 * KUBERNETES_CALICO_CNI_SOURCE Versioned calico/cni binary. Should be null if update rolling via reclass-system level
24 * KUBERNETES_CALICO_CNI_SOURCE_HASH Сalico/cni binary hash. Should be null if update rolling via reclass-system level
25 * KUBERNETES_CALICO_BIRDCL_SOURCE Versioned calico/bird binary. Should be null if update rolling via reclass-system level
26 * KUBERNETES_CALICO_BIRDCL_SOURCE_HASH Сalico/bird binary hash. Should be null if update rolling via reclass-system level
27 * KUBERNETES_CALICO_CNI_IPAM_SOURCE Versioned calico/ipam binary. Should be null if update rolling via reclass-system level
28 * KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH Сalico/ipam binary hash. Should be null if update rolling via reclass-system level
29 * KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE Target calico/kube-controllers image. May be null in case of reclass-system rollout.
30 * CALICO_UPGRADE_VERSION Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
ashestakov138b0a52019-01-11 11:34:54 +020031 * KUBERNETES_ETCD_SOURCE Target etcd binary. May be null in case of reclass-system rollout.
32 * KUBERNETES_ETCD_SOURCE_HASH Target etcd binary checksum. May be null in case of reclass-system rollout.
Victor Ryzhenkinef34a022018-06-22 19:36:13 +040033 *
34**/
Aleksei Kasatkin9ce11842018-11-23 14:27:33 +010035import groovy.json.JsonSlurper
36
Victor Ryzhenkinef34a022018-06-22 19:36:13 +040037def common = new com.mirantis.mk.Common()
38def salt = new com.mirantis.mk.Salt()
39def python = new com.mirantis.mk.Python()
Victor Ryzhenkin723bd062018-12-11 17:09:06 +040040def test = new com.mirantis.mk.Test()
Victor Ryzhenkinef34a022018-06-22 19:36:13 +040041
42def updates = TARGET_UPDATES.tokenize(",").collect{it -> it.trim()}
43def pepperEnv = "pepperEnv"
44
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +020045def POOL = "I@kubernetes:pool"
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +020046
Aleksei Kasatkin1f4f5ba2018-11-20 18:30:36 +010047ETCD_ENDPOINTS = ""
48
Victor Ryzhenkinef34a022018-06-22 19:36:13 +040049def overrideKubernetesImage(pepperEnv) {
50 def salt = new com.mirantis.mk.Salt()
51
52 def k8sSaltOverrides = """
Victor Ryzhenkin3401ee62019-01-18 06:34:26 +040053 kubernetes_hyperkube_source: ${KUBERNETES_HYPERKUBE_SOURCE}
54 kubernetes_hyperkube_source_hash: ${KUBERNETES_HYPERKUBE_SOURCE_HASH}
Victor Ryzhenkinef34a022018-06-22 19:36:13 +040055 kubernetes_pause_image: ${KUBERNETES_PAUSE_IMAGE}
56 """
57 stage("Override kubernetes images to target version") {
58 salt.setSaltOverrides(pepperEnv, k8sSaltOverrides)
59 }
60}
61
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +020062def overrideCalicoImages(pepperEnv) {
63 def salt = new com.mirantis.mk.Salt()
64
65 def calicoSaltOverrides = """
66 kubernetes_calico_image: ${KUBERNETES_CALICO_IMAGE}
Victor Ryzhenkin3401ee62019-01-18 06:34:26 +040067 kubernetes_calico_calicoctl_source: ${KUBERNETES_CALICO_CALICOCTL_SOURCE}
68 kubernetes_calico_calicoctl_source_hash: ${KUBERNETES_CALICO_CALICOCTL_SOURCE_HASH}
69 kubernetes_calico_birdcl_source: ${KUBERNETES_CALICO_BIRDCL_SOURCE}
70 kubernetes_calico_birdcl_source_hash: ${KUBERNETES_CALICO_BIRDCL_SOURCE_HASH}
71 kubernetes_calico_cni_source: ${KUBERNETES_CALICO_CNI_SOURCE}
72 kubernetes_calico_cni_source_hash: ${KUBERNETES_CALICO_CNI_SOURCE_HASH}
73 kubernetes_calico_cni_ipam_source: ${KUBERNETES_CALICO_CNI_IPAM_SOURCE}
74 kubernetes_calico_cni_ipam_source_hash: ${KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH}
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +020075 kubernetes_calico_kube_controllers_image: ${KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE}
76 """
77 stage("Override calico images to target version") {
78 salt.setSaltOverrides(pepperEnv, calicoSaltOverrides)
79 }
80}
81
82def downloadCalicoUpgrader(pepperEnv, target) {
83 def salt = new com.mirantis.mk.Salt()
84
85 stage("Downloading calico-upgrade utility") {
86 salt.cmdRun(pepperEnv, target, "rm -f ./calico-upgrade")
87 salt.cmdRun(pepperEnv, target, "wget https://github.com/projectcalico/calico-upgrade/releases/download/${CALICO_UPGRADE_VERSION}/calico-upgrade")
88 salt.cmdRun(pepperEnv, target, "chmod +x ./calico-upgrade")
89 }
90}
91
ashestakov138b0a52019-01-11 11:34:54 +020092def overrideEtcdSource(pepperEnv) {
93 def salt = new com.mirantis.mk.Salt()
94
95 def k8sSaltOverrides = """
96 kubernetes_etcd_source: ${KUBERNETES_ETCD_SOURCE}
97 kubernetes_etcd_source_hash: ${KUBERNETES_ETCD_SOURCE_HASH}
98 """
99 stage("Override etcd binaries to target version") {
100 salt.setSaltOverrides(pepperEnv, k8sSaltOverrides)
101 }
102}
103
104def performEtcdUpdateAndServicesRestart(pepperEnv, target) {
105 def salt = new com.mirantis.mk.Salt()
106
107 stage("Performing etcd update and services restart on ${target}") {
108 salt.enforceState(pepperEnv, target, "etcd.server.service")
109 salt.cmdRun(pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl cluster-health")
110 }
111}
112
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400113def performKubernetesComputeUpdate(pepperEnv, target) {
114 def salt = new com.mirantis.mk.Salt()
115
116 stage("Execute Kubernetes compute update on ${target}") {
117 salt.enforceState(pepperEnv, target, 'kubernetes.pool')
118 salt.runSaltProcessStep(pepperEnv, target, 'service.restart', ['kubelet'])
119 }
120}
121
122def performKubernetesControlUpdate(pepperEnv, target) {
123 def salt = new com.mirantis.mk.Salt()
124
125 stage("Execute Kubernetes control plane update on ${target}") {
Victor Ryzhenkinc2024132019-01-23 05:39:34 +0400126 salt.enforceStateWithExclude(pepperEnv, target, "kubernetes", "kubernetes.master.setup,kubernetes.master.kube-addons")
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400127 // Restart kubelet
128 salt.runSaltProcessStep(pepperEnv, target, 'service.restart', ['kubelet'])
129 }
130}
131
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200132def startCalicoUpgrade(pepperEnv, target) {
133 def salt = new com.mirantis.mk.Salt()
134
135 stage("Starting upgrade using calico-upgrade: migrate etcd schema and lock Calico") {
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200136 def cmd = "export APIV1_ETCD_ENDPOINTS=${ETCD_ENDPOINTS} && " +
137 "export APIV1_ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem && " +
138 "export APIV1_ETCD_CERT_FILE=/var/lib/etcd/etcd-client.crt && " +
139 "export APIV1_ETCD_KEY_FILE=/var/lib/etcd/etcd-client.key && " +
140 "export ETCD_ENDPOINTS=${ETCD_ENDPOINTS} && " +
141 "export ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem && " +
142 "export ETCD_CERT_FILE=/var/lib/etcd/etcd-client.crt && " +
143 "export ETCD_KEY_FILE=/var/lib/etcd/etcd-client.key && " +
144 "rm /root/upg_complete -f && " +
145 "./calico-upgrade start --no-prompts --ignore-v3-data > upgrade-start.log && " +
146 "until [ -f /root/upg_complete ]; do sleep 0.1; done && " +
147 "./calico-upgrade complete --no-prompts > upgrade-complete.log && " +
148 "rm /root/upg_complete -f"
149 // "saltArgs = ['async']" doesn't work, so we have to run "cmd.run --async"
150 salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' cmd.run '${cmd}' --async")
151 salt.cmdRun(pepperEnv, target, "until [ -f /root/upgrade-start.log ]; do sleep 0.1; done")
152 }
153}
154
155def completeCalicoUpgrade(pepperEnv, target) {
156 def salt = new com.mirantis.mk.Salt()
157
158 stage("Complete upgrade using calico-upgrade: unlock Calico") {
159 salt.cmdRun(pepperEnv, target, "echo 'true' > /root/upg_complete")
160 salt.cmdRun(pepperEnv, target, "while [ -f /root/upg_complete ]; do sleep 0.1; done")
161 salt.cmdRun(pepperEnv, target, "cat /root/upgrade-start.log")
162 salt.cmdRun(pepperEnv, target, "cat /root/upgrade-complete.log")
163 }
164}
165
Aleksei Kasatkind9d682e2018-12-12 14:51:59 +0100166def performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, target, ctl_node) {
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200167 def salt = new com.mirantis.mk.Salt()
168
169 stage("Performing Calico configuration update and services restart") {
Aleksei Kasatkind9d682e2018-12-12 14:51:59 +0100170 if (containerDenabled(pepperEnv, ctl_node)) {
171 salt.enforceState(pepperEnv, target, "kubernetes.pool")
172 } else {
173 salt.enforceState(pepperEnv, target, "kubernetes.pool.calico")
174 }
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200175 salt.runSaltProcessStep(pepperEnv, target, 'service.restart', ['kubelet'])
176 }
177}
178
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400179def cordonNode(pepperEnv, target) {
180 def salt = new com.mirantis.mk.Salt()
181 def originalTarget = "I@kubernetes:master and not ${target}"
182
183 stage("Cordoning ${target} kubernetes node") {
184 def nodeShortName = target.tokenize(".")[0]
185 salt.cmdRun(pepperEnv, originalTarget, "kubectl cordon ${nodeShortName}", true, 1)
186 }
187}
188
189def uncordonNode(pepperEnv, target) {
190 def salt = new com.mirantis.mk.Salt()
191 def originalTarget = "I@kubernetes:master and not ${target}"
192
193 stage("Uncordoning ${target} kubernetes node") {
194 def nodeShortName = target.tokenize(".")[0]
195 salt.cmdRun(pepperEnv, originalTarget, "kubectl uncordon ${nodeShortName}", true, 1)
196 }
197}
198
199def drainNode(pepperEnv, target) {
200 def salt = new com.mirantis.mk.Salt()
201 def originalTarget = "I@kubernetes:master and not ${target}"
202
203 stage("Draining ${target} kubernetes node") {
204 def nodeShortName = target.tokenize(".")[0]
205 salt.cmdRun(pepperEnv, originalTarget, "kubectl drain --force --ignore-daemonsets --grace-period 100 --timeout 300s --delete-local-data ${nodeShortName}", true, 1)
206 }
207}
208
209def regenerateCerts(pepperEnv, target) {
210 def salt = new com.mirantis.mk.Salt()
211
212 stage("Regenerate certs for ${target}") {
213 salt.enforceState(pepperEnv, target, 'salt.minion.cert')
214 }
215}
216
Victor Ryzhenkinae909182018-10-02 17:49:18 +0400217def updateAddons(pepperEnv, target) {
218 def salt = new com.mirantis.mk.Salt()
219
220 stage("Upgrading Addons at ${target}") {
Victor Ryzhenkin40625bc2018-10-04 16:15:27 +0400221 salt.enforceState(pepperEnv, target, "kubernetes.master.kube-addons")
Victor Ryzhenkinfd9677f2018-10-16 16:14:40 +0400222 }
223}
224
225def updateAddonManager(pepperEnv, target) {
226 def salt = new com.mirantis.mk.Salt()
227
228 stage("Upgrading AddonManager at ${target}") {
Victor Ryzhenkinae909182018-10-02 17:49:18 +0400229 salt.enforceState(pepperEnv, target, "kubernetes.master.setup")
230 }
231}
232
Victor Ryzhenkinc2024132019-01-23 05:39:34 +0400233def buildDaemonsetMap(pepperEnv, target) {
234 def salt = new com.mirantis.mk.Salt()
235 def daemonset_lists
236 daemonset_lists = salt.cmdRun(pepperEnv, target, "kubectl get ds --all-namespaces | tail -n+2 | awk '{print \$2, \$1}'"
237 )['return'][0].values()[0].replaceAll('Salt command execution success','').tokenize("\n")
238 def daemonset_map = []
239 for (ds in daemonset_lists) {
240 a = ds.tokenize(" ")
241 daemonset_map << a
242 }
243 print("Built daemonset map")
244 print(daemonset_map)
245 return daemonset_map
246}
247
248def purgeDaemonsetPods(pepperEnv, target, daemonSetMap) {
249 def salt = new com.mirantis.mk.Salt()
250 def originalTarget = "I@kubernetes:master and not ${target}"
251 def nodeShortName = target.tokenize(".")[0]
252 firstTarget = salt.getFirstMinion(pepperEnv, originalTarget)
253
254 if (daemonSetMap) {
255 stage("Purging daemonset-managed pods on ${target}") {
256 for (ds in daemonSetMap) {
257 print("Purging "+ ds[0] +" inside "+ ds[1] +" namespace")
258 salt.cmdRun(pepperEnv, firstTarget, "kubectl get po -n ${ds[1]} -o wide | grep ${nodeShortName}" +
259 " | grep ${ds[0]} | awk '{print \$1}' | xargs --no-run-if-empty kubectl delete po -n ${ds[1]} --grace-period=0 --force")
260 }
261 }
262 }
263}
264
265def isNodeReady(pepperEnv, target) {
266 def salt = new com.mirantis.mk.Salt()
267 def originalTarget = "I@kubernetes:master and not ${target}"
268 def nodeShortName = target.tokenize(".")[0]
269 firstTarget = salt.getFirstMinion(pepperEnv, originalTarget)
270
271 status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no | grep ${nodeShortName} | awk '{print \$2}'"
272 )['return'][0].values()[0].replaceAll('Salt command execution success',''
273 ).replaceAll(',SchedulingDisabled','').trim()
274
275 if (status == "Ready") {
276 return true
277 } else {
278 return false
279 }
280}
281
282def rebootKubernetesNode(pepperEnv, target, times=15, delay=10) {
283 def common = new com.mirantis.mk.Common()
284 def debian = new com.mirantis.mk.Debian()
285
286 stage("Rebooting ${target}") {
287 debian.osReboot(pepperEnv, target)
288 common.retry(times, delay) {
289 if(!isNodeReady(pepperEnv, target)) {
290 error("Node still not in Ready state...")
291 }
292 }
293 }
294}
295
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400296def upgradeDocker(pepperEnv, target) {
297 def salt = new com.mirantis.mk.Salt()
298
299 stage("Upgrading docker at ${target}") {
300 salt.enforceState(pepperEnv, target, 'docker.host')
301 }
302}
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400303
Victor Ryzhenkinae22a5a2018-10-12 15:52:27 +0400304def runConformance(pepperEnv, target, k8s_api, image) {
305 def salt = new com.mirantis.mk.Salt()
306 def containerName = 'conformance_tests'
307 output_file = image.replaceAll('/', '-') + '.output'
308 def output_file_full_path = "/tmp/" + image.replaceAll('/', '-') + '.output'
309 def artifacts_dir = '_artifacts/'
310 salt.cmdRun(pepperEnv, target, "docker rm -f ${containerName}", false)
311 salt.cmdRun(pepperEnv, target, "docker run -d --name ${containerName} --net=host -e API_SERVER=${k8s_api} ${image}")
312 sleep(10)
313
314 print("Waiting for tests to run...")
315 salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', ["docker wait ${containerName}"], null, false)
316
317 print("Writing test results to output file...")
318 salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', ["docker logs -t ${containerName} > ${output_file_full_path}"])
319 print("Conformance test output saved in " + output_file_full_path)
320
321 // collect output
322 sh "mkdir -p ${artifacts_dir}"
323 file_content = salt.getFileContent(pepperEnv, target, '/tmp/' + output_file)
324 writeFile file: "${artifacts_dir}${output_file}", text: file_content
325 sh "cat ${artifacts_dir}${output_file}"
326 try {
327 sh "cat ${artifacts_dir}${output_file} | grep 'Test Suite Failed' && exit 1 || exit 0"
328 } catch (Throwable e) {
329 print("Conformance tests failed. Please check output")
330 currentBuild.result = "FAILURE"
331 currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
332 throw e
333 }
334}
335
336def buildImageURL(pepperEnv, target, mcp_repo) {
337 def salt = new com.mirantis.mk.Salt()
338 def raw_version = salt.cmdRun(pepperEnv, target, "kubectl version --short -o json")['return'][0].values()[0].replaceAll('Salt command execution success','')
339 print("Kubernetes version: " + raw_version)
340 def serialized_version = readJSON text: raw_version
341 def short_version = (serialized_version.serverVersion.gitVersion =~ /([v])(\d+\.)(\d+\.)(\d+\-)(\d+)/)[0][0]
342 print("Kubernetes short version: " + short_version)
343 def conformance_image = mcp_repo + "/mirantis/kubernetes/k8s-conformance:" + short_version
344 return conformance_image
345}
346
347def executeConformance(pepperEnv, target, k8s_api, mcp_repo) {
348 stage("Running conformance tests") {
349 def image = buildImageURL(pepperEnv, target, mcp_repo)
350 print("Using image: " + image)
351 runConformance(pepperEnv, target, k8s_api, image)
352 }
353}
354
Victor Ryzhenkin723bd062018-12-11 17:09:06 +0400355def containerDinstalled(pepperEnv, target) {
356 def salt = new com.mirantis.mk.Salt()
357 return salt.cmdRun(pepperEnv, target, "containerd --version 2>1 1>/dev/null && echo 'true' || echo 'false'"
358 )['return'][0].values()[0].replaceAll('Salt command execution success','').trim().toBoolean()
359}
360
361def containerDenabled(pepperEnv, target) {
362 def salt = new com.mirantis.mk.Salt()
Victor Ryzhenkin71ecdf42018-12-11 22:22:50 +0400363 return salt.getPillar(pepperEnv, target, "kubernetes:common:containerd:enabled"
Victor Ryzhenkin723bd062018-12-11 17:09:06 +0400364 )["return"][0].values()[0].toBoolean()
365}
366
367def conformancePodDefExists(pepperEnv, target) {
368 def salt = new com.mirantis.mk.Salt()
369 return salt.cmdRun(pepperEnv, target, "test -e /srv/kubernetes/conformance.yml && echo 'true' || echo 'false'"
370 )['return'][0].values()[0].replaceAll('Salt command execution success','').trim().toBoolean()
371}
372
Victor Ryzhenkin3401ee62019-01-18 06:34:26 +0400373def printVersionInfo(pepperEnv, target) {
374 def salt = new com.mirantis.mk.Salt()
375 def common = new com.mirantis.mk.Common()
376
377 stage("Gather version and runtime information") {
378 common.infoMsg("Version and runtime info:")
379 salt.cmdRun(pepperEnv, target, "kubectl get no -o wide")
380 common.infoMsg("Cluster health info:")
381 salt.cmdRun(pepperEnv, target, "kubectl get cs")
382 common.infoMsg("ETCD health info:")
Victor Ryzhenkin3029c8b2019-01-18 22:17:57 +0400383 salt.cmdRun(pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl cluster-health")
Victor Ryzhenkin3401ee62019-01-18 06:34:26 +0400384 common.infoMsg("Calico peers info:")
385 salt.cmdRun(pepperEnv, target, "calicoctl node status")
386 }
387}
388
Aleksei Kasatkin5ccea272018-12-06 17:34:58 +0100389def calicoEnabled(pepperEnv, target) {
390 def salt = new com.mirantis.mk.Salt()
391 return salt.getPillar(pepperEnv, target, "kubernetes:pool:network:calico:enabled"
392 )["return"][0].values()[0].toBoolean()
393}
394
395def checkCalicoClusterState(pepperEnv, target) {
396 def common = new com.mirantis.mk.Common()
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200397 def salt = new com.mirantis.mk.Salt()
398
Aleksei Kasatkin5ccea272018-12-06 17:34:58 +0100399 stage("Checking Calico cluster state after upgrade") {
400 // check Calico cluster and cli clients versions
401 def checkVer = [
402 "Client Version:": [verStr: "", dif: false, wrong: false],
403 "Cluster Version:": [verStr: "", dif: false, wrong: false]
404 ]
405 def checkVerPassed = true
406 def versionResults = salt.cmdRun(pepperEnv, target, "calicoctl version | grep -i version")['return'][0]
407 versionResults.each { k, v ->
408 // println("Node:\n${k}\nResult:\n${v}")
409 for (verLine in v.split("\n")) {
410 for (verType in checkVer.keySet()) {
411 if (verLine.contains(verType)) {
412 def verRec = checkVer[verType]
413 ver = (verLine - verType).trim()
414 if (!verRec.verStr) {
415 verRec.verStr = ver
416 }
417 if (verRec.verStr != ver) {
418 verRec.dif = true
419 checkVerPassed = false
420 }
421 version = ver.tokenize(".")
422 if ((version.size() < 3) || (version[0] != "v3")) {
423 verRec.wrong = true
424 checkVerPassed = false
425 }
426 checkVer[verType] = verRec
427 }
428 }
429 }
430 }
431 if (checkVerPassed) {
432 common.infoMsg("Calico version verification passed")
433 }
434 else {
435 def warningMsg = "Calico version verification failed.\n"
436 checkVer.each { k, rec ->
437 if (rec.dif) {
438 warningMsg += "${k} versions are different across nodes.\n"
439 }
440 if (rec.wrong) {
441 warningMsg += "${k} (some) versions are wrong - should be v3.x.\n"
442 }
443 }
444 common.warningMsg(warningMsg)
445 currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
446 }
447
448 // check Calico nodes' statuses
449 def nodeStatusResults = salt.cmdRun(pepperEnv, target, "calicoctl node status")['return'][0]
450 def nodesRunning = true
451 def peersNotFound = []
452 def peersNotOnline = []
453 nodeStatusResults.each { k, v ->
454 // println("Node:\n${k}\nResult:\n${v}")
455 if (!v.contains("Calico process is running")) {
456 nodesRunning = false
457 def warningMsg = "Node ${k}: Calico node is not running."
458 common.warningMsg(warningMsg)
459 currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
460 }
461 def nodePeersFound = false
462 def nodePeersOnline = true
463 for (nodeLine in v.split("\n")) {
464 if (nodeLine.contains("|") && (!nodeLine.contains("STATE"))) {
465 def col = nodeLine.tokenize("|").collect{it.trim()}
466 if (col.size() == 5) {
467 nodePeersFound = true
468 if ((col[2] != "up") || (col[4] != "Established")) {
469 def warningMsg = "Node ${k}: BGP peer '${col[0]}' is out of reach. Peer state: '${col[2]}', connection info: '${col[4]}'."
470 common.warningMsg(warningMsg)
471 currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
472 nodePeersOnline = false
473 }
474 }
475 }
476 }
477 if (!nodePeersFound) {
478 peersNotFound += k
479 }
480 if (!nodePeersOnline) {
481 peersNotOnline += k
482 }
483 }
484 if (nodesRunning) {
485 common.infoMsg("All the Calico nodes are running")
486 }
487 if (peersNotFound) {
488 def warningMsg = "BGP peers not found for the node(s): " + peersNotFound.join(', ') + "."
489 common.warningMsg(warningMsg)
490 currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
491 } else {
492 common.infoMsg("BGP peers were found for all the nodes")
493 }
494 if (!peersNotOnline) {
495 common.infoMsg("All reported BGP peers are reachable")
496 }
497
498 // check that 'calico-kube-controllers' is running
499 // one CTL node will be used to get pod's state using kubectl
500 def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
501 def kubeCtrlResult = salt.cmdRun(
502 pepperEnv, ctl_node, "kubectl get pod -n kube-system --selector=k8s-app=calico-kube-controllers"
503 )['return'][0].values()[0].toString()
504 if (kubeCtrlResult.contains("calico-kube-controllers")) {
505 for (line in kubeCtrlResult.split("\n")) {
506 if (line.contains("calico-kube-controllers")) {
507 col = line.tokenize(" ")
508 if ((col[1] != "1/1") || (col[2] != "Running")) {
509 def warningMsg = "Calico kube-controllers pod is not running properly."
510 common.warningMsg(warningMsg)
511 currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
512 }
513 else {
514 common.infoMsg("Calico kube-controllers pod is running.")
515 }
516 break
517 }
518 }
519 } else {
520 def warningMsg = "Calico kube-controllers pod was not scheduled."
521 common.warningMsg(warningMsg)
522 currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
523 }
Aleksei Kasatkin1f4f5ba2018-11-20 18:30:36 +0100524 }
525}
526
527def checkCalicoUpgradePossibility(pepperEnv, target) {
528 def salt = new com.mirantis.mk.Salt()
529
530 stage("Verification of Calico upgrade possibility") {
531 // check Calico version
532 def versionResult = salt.cmdRun(
533 pepperEnv, target, "calicoctl version | grep 'Cluster Version'"
534 )['return'][0].values()[0].split("\n")[0].trim()
535 versionStr = (versionResult - "Cluster Version:").trim()
536 version = versionStr.tokenize(".")
537 if ((version.size() < 3) || (version[0] != "v2") || (version[1] != "6") || (version[2].toInteger() < 5)) {
538 error(
539 "Current Calico ${versionStr} cannot be upgraded to v3.x. " +
540 "Calico v2.6.x starting from v2.6.5 can be upgraded. " +
541 "For earlier versions, please update to v2.6.5 first."
542 )
543 }
544 print("Calico version was determined: ${versionStr}")
545
546 // check Calico is switched on
547 def readinessResult = salt.cmdRun(
548 pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl get /calico/v1/Ready"
549 )['return'][0].values()[0].split("\n")[0].trim()
550 print("Calico readiness check result: ${readinessResult}")
551 if (readinessResult != "true") {
552 // try set it to true
553 readinessResult = salt.cmdRun(
554 pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl set /calico/v1/Ready true"
555 )['return'][0].values()[0].split("\n")[0].trim()
556 print("Calico readiness result 2nd attempt: ${readinessResult}")
557 if (readinessResult != "true") {
558 error("Calico is not ready. '/calico/v1/Ready': '${readinessResult}'")
559 }
560 }
561
562 // Calico data upgrade dry-run
563 def cmd = "export APIV1_ETCD_ENDPOINTS=${ETCD_ENDPOINTS} && " +
564 "export APIV1_ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem && " +
565 "export APIV1_ETCD_CERT_FILE=/var/lib/etcd/etcd-client.crt && " +
566 "export APIV1_ETCD_KEY_FILE=/var/lib/etcd/etcd-client.key && " +
567 "export ETCD_ENDPOINTS=${ETCD_ENDPOINTS} && " +
568 "export ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem && " +
569 "export ETCD_CERT_FILE=/var/lib/etcd/etcd-client.crt && " +
570 "export ETCD_KEY_FILE=/var/lib/etcd/etcd-client.key && " +
571 "./calico-upgrade dry-run --ignore-v3-data"
572 def dryRunResult = salt.cmdRun(pepperEnv, target, cmd)['return'][0].values()[0]
573 // check dry-run result
574 def validationSuccessStr = "Successfully validated v1 to v3 conversion"
575 if (!dryRunResult.contains(validationSuccessStr)) {
576 error("Calico data upgrade dry-run has failed")
577 }
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200578 }
579}
Victor Ryzhenkinae22a5a2018-10-12 15:52:27 +0400580
Aleksei Kasatkin9ce11842018-11-23 14:27:33 +0100581def checkCalicoPolicySetting(pepperEnv, target) {
582 def common = new com.mirantis.mk.Common()
583 def salt = new com.mirantis.mk.Salt()
584
585 stage("Checking of Calico network policy setting") {
586 // check Calico policy enabled
587 def cniPolicy = false
588 def addonsPolicy = false
589 def kubeCtrlRunning = false
590
591 // check CNI config
592 def cniCfgResult = salt.cmdRun(
593 pepperEnv, target, "cat /etc/cni/net.d/10-calico.conf"
594 )['return'][0].values()[0].toString()
595 def cniCfg = new JsonSlurper().parseText(cniCfgResult)
596 if (cniCfg.get("policy") != null) {
597 if (cniCfg["policy"].get("type") == "k8s") {
598 cniPolicy = true
599 } else {
600 common.warningMsg("Calico policy type is unknown or not set.")
601 }
602 }
603
604 // check k8s addons
605 def addonsResult = salt.cmdRun(
606 pepperEnv, target, "ls /etc/kubernetes/addons"
607 )['return'][0].values()[0].toString()
608 if (addonsResult.contains("calico_policy")) {
609 addonsPolicy = true
610 }
611
612 // check kube-controllers is running
613 def kubeCtrlResult = salt.cmdRun(
614 pepperEnv, target, "kubectl get pod -n kube-system --selector=k8s-app=calico-kube-controllers"
615 )['return'][0].values()[0].toString()
616 if (kubeCtrlResult.contains("Running")) {
617 kubeCtrlRunning = true
618 }
619
620 // It's safe to enable Calico policy any time, but it may be unsafe to disable it.
621 // So, no need to disable Calico policy for v3.x if it's not in use currently.
622 // But if Calico policy is in use already, it should be enabled after upgrade as well.
623
624 // check for consistency
625 if ((cniPolicy != addonsPolicy) || (addonsPolicy != kubeCtrlRunning)) {
626 caution = "ATTENTION. Calico policy setting cannot be determined reliably (enabled in CNI config: ${cniPolicy}, " +
627 "presence in k8s addons: ${addonsPolicy}, kube-controllers is running: ${kubeCtrlRunning})."
628 currentBuild.description += "<br><b>${caution}</b><br><br>"
629 common.warningMsg(caution)
630 } else {
631 common.infoMsg("Current Calico policy state is detected as: ${cniPolicy}")
632 if (cniPolicy) {
633 // Calico policy is in use. Check policy setting for v3.x.
634 common.infoMsg("Calico policy is in use. It should be enabled for v3.x as well.")
635 def saltPolicyResult = salt.getPillar(
636 pepperEnv, target, "kubernetes:pool:network:calico:policy"
637 )["return"][0].values()[0].toString()
638
639 common.infoMsg("kubernetes.pool.network.calico.policy: ${saltPolicyResult}")
640 if (saltPolicyResult.toLowerCase().contains("true")) {
641 common.infoMsg("Calico policy setting for v3.x is detected as: true")
642 } else {
643 caution = "ATTENTION. Currently, Calico is running with policy switched on. " +
644 "Calico policy setting for v3.x is not set to true. " +
645 "After upgrade is completed, Calico policy will be switched off. " +
646 "You will need to switch it on manually if required."
647 currentBuild.description += "<br><b>${caution}</b><br><br>"
648 common.warningMsg(caution)
649 }
650 }
651 }
652
653 if (addonsPolicy) {
654 // Remove v2.6.x policy-related addons on masters to not interfere with v3.x kube-controllers
655 salt.cmdRun(pepperEnv, CTL_TARGET, "rm -rf /etc/kubernetes/addons/calico_policy")
656 }
657 }
658}
659
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400660timeout(time: 12, unit: 'HOURS') {
661 node() {
662 try {
663
664 stage("Setup virtualenv for Pepper") {
665 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
666 }
667
Victor Ryzhenkinc2024132019-01-23 05:39:34 +0400668 def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
669 def daemonsetMap = buildDaemonsetMap(pepperEnv, ctl_node)
670
Victor Ryzhenkinae22a5a2018-10-12 15:52:27 +0400671 if (CONFORMANCE_RUN_BEFORE.toBoolean()) {
672 def target = CTL_TARGET
673 def mcp_repo = ARTIFACTORY_URL
674 def k8s_api = TEST_K8S_API_SERVER
675 firstTarget = salt.getFirstMinion(pepperEnv, target)
Victor Ryzhenkin723bd062018-12-11 17:09:06 +0400676 def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
677 def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
678 def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
679 if (containerd_enabled && containerd_installed && conformance_pod_ready) {
680 def config = ['master': pepperEnv,
681 'target': firstTarget,
682 'junitResults': false,
683 'autodetect': true]
684 test.executeConformance(config)
685 } else {
686 executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
687 }
Victor Ryzhenkinae22a5a2018-10-12 15:52:27 +0400688 }
689
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400690 if ((common.validInputParam('KUBERNETES_HYPERKUBE_IMAGE')) && (common.validInputParam('KUBERNETES_PAUSE_IMAGE'))) {
691 overrideKubernetesImage(pepperEnv)
692 }
693
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200694 if ((common.validInputParam('KUBERNETES_CALICO_IMAGE'))
Victor Ryzhenkin3401ee62019-01-18 06:34:26 +0400695 && (common.validInputParam('KUBERNETES_CALICO_CALICOCTL_SOURCE'))
696 && (common.validInputParam('KUBERNETES_CALICO_CALICOCTL_SOURCE_HASH'))
697 && (common.validInputParam('KUBERNETES_CALICO_CNI_SOURCE'))
698 && (common.validInputParam('KUBERNETES_CALICO_CNI_SOURCE_HASH'))
699 && (common.validInputParam('KUBERNETES_CALICO_BIRDCL_SOURCE'))
700 && (common.validInputParam('KUBERNETES_CALICO_BIRDCL_SOURCE_HASH'))
701 && (common.validInputParam('KUBERNETES_CALICO_CNI_IPAM_SOURCE'))
702 && (common.validInputParam('KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH'))
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200703 && (common.validInputParam('KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE'))
704 ) {
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200705 overrideCalicoImages(pepperEnv)
706 }
707
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400708 /*
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200709 * Execute Calico upgrade if needed (only for v2 to v3 upgrade).
710 * This part causes workloads operations downtime.
711 * It is only required for Calico v2.x to v3.x upgrade when etcd is in use for Calico
712 * as Calico etcd schema has different formats for Calico v2.x and Calico v3.x.
713 */
714 if (UPGRADE_CALICO_V2_TO_V3.toBoolean()) {
Aleksei Kasatkin1f4f5ba2018-11-20 18:30:36 +0100715 // get ETCD_ENDPOINTS in use by Calico
716 def ep_str = salt.cmdRun(pepperEnv, ctl_node, "cat /etc/calico/calicoctl.cfg | grep etcdEndpoints")['return'][0].values()[0]
717 ETCD_ENDPOINTS = ep_str.split("\n")[0].tokenize(' ')[1]
718 print("ETCD_ENDPOINTS in use by Calico: '${ETCD_ENDPOINTS}'")
719
720 // download calico-upgrade utility
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200721 downloadCalicoUpgrader(pepperEnv, ctl_node)
Aleksei Kasatkin1f4f5ba2018-11-20 18:30:36 +0100722
723 // check the possibility of upgrading of Calico
724 checkCalicoUpgradePossibility(pepperEnv, ctl_node)
725
Aleksei Kasatkin9ce11842018-11-23 14:27:33 +0100726 // check and adjust Calico policy setting
727 checkCalicoPolicySetting(pepperEnv, ctl_node)
728
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200729 // this sequence implies workloads operations downtime
730 startCalicoUpgrade(pepperEnv, ctl_node)
Aleksei Kasatkind9d682e2018-12-12 14:51:59 +0100731 performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, POOL, ctl_node)
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200732 completeCalicoUpgrade(pepperEnv, ctl_node)
Aleksei Kasatkin5ccea272018-12-06 17:34:58 +0100733 // no downtime is expected after this point
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200734 }
735
736 /*
ashestakov138b0a52019-01-11 11:34:54 +0200737 * Execute etcd update
738 */
739 if ((common.validInputParam('KUBERNETES_ETCD_SOURCE')) && (common.validInputParam('KUBERNETES_ETCD_SOURCE_HASH'))) {
740 overrideEtcdSource(pepperEnv)
741 }
742 def targetHosts = salt.getMinionsSorted(pepperEnv, "I@etcd:server")
743 for (t in targetHosts) {
744 performEtcdUpdateAndServicesRestart(pepperEnv, t)
745 }
746
747 /*
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200748 * Execute k8s update
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400749 */
750 if (updates.contains("ctl")) {
751 def target = CTL_TARGET
752
753 if (PER_NODE.toBoolean()) {
754 def targetHosts = salt.getMinionsSorted(pepperEnv, target)
755
756 for (t in targetHosts) {
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400757 if (SIMPLE_UPGRADE.toBoolean()) {
758 performKubernetesControlUpdate(pepperEnv, t)
759 } else {
760 cordonNode(pepperEnv, t)
761 drainNode(pepperEnv, t)
762 regenerateCerts(pepperEnv, t)
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400763 performKubernetesControlUpdate(pepperEnv, t)
Victor Ryzhenkinfd9677f2018-10-16 16:14:40 +0400764 updateAddonManager(pepperEnv, t)
Victor Ryzhenkinc2024132019-01-23 05:39:34 +0400765 if (daemonsetMap) {
766 purgeDaemonsetPods(pepperEnv, t, daemonsetMap)
767 rebootKubernetesNode(pepperEnv, t)
768 }
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400769 uncordonNode(pepperEnv, t)
770 }
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400771 }
772 } else {
773 performKubernetesControlUpdate(pepperEnv, target)
774 }
Victor Ryzhenkinfd9677f2018-10-16 16:14:40 +0400775 if (!SIMPLE_UPGRADE.toBoolean()) {
Aleksei Kasatkinff9d5b52018-10-26 11:47:46 +0200776 // Addons upgrade should be performed after all nodes will be upgraded
Victor Ryzhenkinfd9677f2018-10-16 16:14:40 +0400777 updateAddons(pepperEnv, target)
778 // Wait for 90 sec for addons reconciling
779 sleep(90)
780 }
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400781 }
782
783 if (updates.contains("cmp")) {
784 def target = CMP_TARGET
785
786 if (PER_NODE.toBoolean()) {
787 def targetHosts = salt.getMinionsSorted(pepperEnv, target)
788
789 for (t in targetHosts) {
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400790 if (SIMPLE_UPGRADE.toBoolean()) {
791 performKubernetesComputeUpdate(pepperEnv, t)
792 } else {
793 cordonNode(pepperEnv, t)
794 drainNode(pepperEnv, t)
795 regenerateCerts(pepperEnv, t)
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400796 performKubernetesComputeUpdate(pepperEnv, t)
Victor Ryzhenkinc2024132019-01-23 05:39:34 +0400797 if (daemonsetMap) {
798 purgeDaemonsetPods(pepperEnv, t, daemonsetMap)
799 rebootKubernetesNode(pepperEnv, t)
800 }
Victor Ryzhenkin42e4b382018-09-11 17:57:56 +0400801 uncordonNode(pepperEnv, t)
802 }
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400803 }
804 } else {
805 performKubernetesComputeUpdate(pepperEnv, target)
806 }
807 }
Victor Ryzhenkinae22a5a2018-10-12 15:52:27 +0400808
Aleksei Kasatkin5ccea272018-12-06 17:34:58 +0100809 if (calicoEnabled(pepperEnv, ctl_node)) {
810 checkCalicoClusterState(pepperEnv, POOL)
811 }
Victor Ryzhenkin3401ee62019-01-18 06:34:26 +0400812 printVersionInfo(pepperEnv, ctl_node)
Aleksei Kasatkin5ccea272018-12-06 17:34:58 +0100813
Victor Ryzhenkinae22a5a2018-10-12 15:52:27 +0400814 if (CONFORMANCE_RUN_AFTER.toBoolean()) {
815 def target = CTL_TARGET
816 def mcp_repo = ARTIFACTORY_URL
817 def k8s_api = TEST_K8S_API_SERVER
818 firstTarget = salt.getFirstMinion(pepperEnv, target)
Victor Ryzhenkin723bd062018-12-11 17:09:06 +0400819 def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
820 def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
821 def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
822 if (containerd_enabled && containerd_installed && conformance_pod_ready) {
823 def config = ['master': pepperEnv,
824 'target': firstTarget,
825 'junitResults': false,
826 'autodetect': true]
827 test.executeConformance(config)
828 } else {
829 executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
830 }
Victor Ryzhenkinae22a5a2018-10-12 15:52:27 +0400831 }
Victor Ryzhenkinef34a022018-06-22 19:36:13 +0400832 } catch (Throwable e) {
833 // If there was an error or exception thrown, the build failed
834 currentBuild.result = "FAILURE"
835 currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
836 throw e
837 }
838 }
Victor Ryzhenkinc2024132019-01-23 05:39:34 +0400839}