Merge "Add verification of restoration step"
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 1c168f3..79de867 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -2,26 +2,32 @@
* Update kuberentes cluster
*
* Expected parameters:
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
- * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * KUBERNETES_HYPERKUBE_IMAGE Target kubernetes version. May be null in case of reclass-system rollout
- * KUBERNETES_PAUSE_IMAGE Kubernetes pause image should have same version as hyperkube. May be null in case of reclass-system rollout
- * TARGET_UPDATES Comma separated list of nodes to update (Valid values are ctl,cmp)
- * CTL_TARGET Salt targeted kubernetes CTL nodes (ex. I@kubernetes:master). Kubernetes control plane
- * CMP_TARGET Salt targeted compute nodes (ex. cmp* and 'I@kubernetes:pool') Kubernetes computes
- * PER_NODE Target nodes will be managed one by one (bool)
- * SIMPLE_UPGRADE Use previous version of upgrade without conron/drain abilities
- * UPGRADE_DOCKER Upgrade docker component
- * CONFORMANCE_RUN_AFTER Run Kubernetes conformance tests after update
- * CONFORMANCE_RUN_BEFORE Run Kubernetes conformance tests before update
- * TEST_K8S_API_SERVER Kubernetes API server address for test execution
- * ARTIFACTORY_URL Artifactory URL where docker images located. Needed to correctly fetch conformance images.
- * UPGRADE_CALICO_V2_TO_V3 Perform Calico upgrade from v2 to v3.
- * KUBERNETES_CALICO_IMAGE Target calico/node image. May be null in case of reclass-system rollout.
- * KUBERNETES_CALICO_CALICOCTL_IMAGE Target calico/ctl image. May be null in case of reclass-system rollout.
- * KUBERNETES_CALICO_CNI_IMAGE Target calico/cni image. May be null in case of reclass-system rollout.
- * KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE Target calico/kube-controllers image. May be null in case of reclass-system rollout.
- * CALICO_UPGRADE_VERSION Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
+ * KUBERNETES_HYPERKUBE_SOURCE Versioned hyperkube binary to update control plane from. Should be null if update rolling via reclass-system level
+ * KUBERNETES_HYPERKUBE_SOURCE_HASH Versioned hyperkube binary to update control plane from. Should be null if update rolling via reclass-system level
+ * KUBERNETES_PAUSE_IMAGE Kubernetes pause image should have same version as hyperkube. May be null in case of reclass-system rollout
+ * TARGET_UPDATES Comma separated list of nodes to update (Valid values are ctl,cmp)
+ * CTL_TARGET Salt targeted kubernetes CTL nodes (ex. I@kubernetes:master). Kubernetes control plane
+ * CMP_TARGET Salt targeted compute nodes (ex. cmp* and 'I@kubernetes:pool') Kubernetes computes
+ * PER_NODE Target nodes will be managed one by one (bool)
+ * SIMPLE_UPGRADE Use previous version of upgrade without conron/drain abilities
+ * CONFORMANCE_RUN_AFTER Run Kubernetes conformance tests after update
+ * CONFORMANCE_RUN_BEFORE Run Kubernetes conformance tests before update
+ * TEST_K8S_API_SERVER Kubernetes API server address for test execution
+ * ARTIFACTORY_URL Artifactory URL where docker images located. Needed to correctly fetch conformance images.
+ * UPGRADE_CALICO_V2_TO_V3 Perform Calico upgrade from v2 to v3.
+ * KUBERNETES_CALICO_IMAGE Target calico/node image. May be null in case of reclass-system rollout.
+ * KUBERNETES_CALICO_CALICOCTL_SOURCE Versioned calico/ctl binary. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_CALICOCTL_SOURCE_HASH Calico/ctl binary md5 hash. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_CNI_SOURCE Versioned calico/cni binary. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_CNI_SOURCE_HASH Сalico/cni binary hash. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_BIRDCL_SOURCE Versioned calico/bird binary. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_BIRDCL_SOURCE_HASH Сalico/bird binary hash. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_CNI_IPAM_SOURCE Versioned calico/ipam binary. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH Сalico/ipam binary hash. Should be null if update rolling via reclass-system level
+ * KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE Target calico/kube-controllers image. May be null in case of reclass-system rollout.
+ * CALICO_UPGRADE_VERSION Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
*
**/
import groovy.json.JsonSlurper
@@ -35,7 +41,6 @@
def pepperEnv = "pepperEnv"
def POOL = "I@kubernetes:pool"
-def calicoImagesValid = false
ETCD_ENDPOINTS = ""
@@ -43,7 +48,8 @@
def salt = new com.mirantis.mk.Salt()
def k8sSaltOverrides = """
- kubernetes_hyperkube_image: ${KUBERNETES_HYPERKUBE_IMAGE}
+ kubernetes_hyperkube_source: ${KUBERNETES_HYPERKUBE_SOURCE}
+ kubernetes_hyperkube_source_hash: ${KUBERNETES_HYPERKUBE_SOURCE_HASH}
kubernetes_pause_image: ${KUBERNETES_PAUSE_IMAGE}
"""
stage("Override kubernetes images to target version") {
@@ -56,8 +62,14 @@
def calicoSaltOverrides = """
kubernetes_calico_image: ${KUBERNETES_CALICO_IMAGE}
- kubernetes_calico_calicoctl_image: ${KUBERNETES_CALICO_CALICOCTL_IMAGE}
- kubernetes_calico_cni_image: ${KUBERNETES_CALICO_CNI_IMAGE}
+ kubernetes_calico_calicoctl_source: ${KUBERNETES_CALICO_CALICOCTL_SOURCE}
+ kubernetes_calico_calicoctl_source_hash: ${KUBERNETES_CALICO_CALICOCTL_SOURCE_HASH}
+ kubernetes_calico_birdcl_source: ${KUBERNETES_CALICO_BIRDCL_SOURCE}
+ kubernetes_calico_birdcl_source_hash: ${KUBERNETES_CALICO_BIRDCL_SOURCE_HASH}
+ kubernetes_calico_cni_source: ${KUBERNETES_CALICO_CNI_SOURCE}
+ kubernetes_calico_cni_source_hash: ${KUBERNETES_CALICO_CNI_SOURCE_HASH}
+ kubernetes_calico_cni_ipam_source: ${KUBERNETES_CALICO_CNI_IPAM_SOURCE}
+ kubernetes_calico_cni_ipam_source_hash: ${KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH}
kubernetes_calico_kube_controllers_image: ${KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE}
"""
stage("Override calico images to target version") {
@@ -75,17 +87,6 @@
}
}
-def pullCalicoImages(pepperEnv, target) {
- def salt = new com.mirantis.mk.Salt()
-
- stage("Pulling updated Calico docker images") {
- salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_IMAGE}")
- salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_CALICOCTL_IMAGE}")
- salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_CNI_IMAGE}")
- salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE}")
- }
-}
-
def performKubernetesComputeUpdate(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
@@ -283,6 +284,22 @@
)['return'][0].values()[0].replaceAll('Salt command execution success','').trim().toBoolean()
}
+def printVersionInfo(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+
+ stage("Gather version and runtime information") {
+ common.infoMsg("Version and runtime info:")
+ salt.cmdRun(pepperEnv, target, "kubectl get no -o wide")
+ common.infoMsg("Cluster health info:")
+ salt.cmdRun(pepperEnv, target, "kubectl get cs")
+ common.infoMsg("ETCD health info:")
+ salt.cmdRun(pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl cluster-health")
+ common.infoMsg("Calico peers info:")
+ salt.cmdRun(pepperEnv, target, "calicoctl node status")
+ }
+}
+
def calicoEnabled(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
return salt.getPillar(pepperEnv, target, "kubernetes:pool:network:calico:enabled"
@@ -586,11 +603,16 @@
}
if ((common.validInputParam('KUBERNETES_CALICO_IMAGE'))
- && (common.validInputParam('KUBERNETES_CALICO_CALICOCTL_IMAGE'))
- && (common.validInputParam('KUBERNETES_CALICO_CNI_IMAGE'))
+ && (common.validInputParam('KUBERNETES_CALICO_CALICOCTL_SOURCE'))
+ && (common.validInputParam('KUBERNETES_CALICO_CALICOCTL_SOURCE_HASH'))
+ && (common.validInputParam('KUBERNETES_CALICO_CNI_SOURCE'))
+ && (common.validInputParam('KUBERNETES_CALICO_CNI_SOURCE_HASH'))
+ && (common.validInputParam('KUBERNETES_CALICO_BIRDCL_SOURCE'))
+ && (common.validInputParam('KUBERNETES_CALICO_BIRDCL_SOURCE_HASH'))
+ && (common.validInputParam('KUBERNETES_CALICO_CNI_IPAM_SOURCE'))
+ && (common.validInputParam('KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH'))
&& (common.validInputParam('KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE'))
) {
- calicoImagesValid = true
overrideCalicoImages(pepperEnv)
}
@@ -615,11 +637,6 @@
// check the possibility of upgrading of Calico
checkCalicoUpgradePossibility(pepperEnv, ctl_node)
- // prepare for upgrade. when done in advance, this will decrease downtime during upgrade
- if (calicoImagesValid) {
- pullCalicoImages(pepperEnv, POOL)
- }
-
// check and adjust Calico policy setting
checkCalicoPolicySetting(pepperEnv, ctl_node)
@@ -646,9 +663,6 @@
cordonNode(pepperEnv, t)
drainNode(pepperEnv, t)
regenerateCerts(pepperEnv, t)
- if (UPGRADE_DOCKER.toBoolean()) {
- upgradeDocker(pepperEnv, t)
- }
performKubernetesControlUpdate(pepperEnv, t)
updateAddonManager(pepperEnv, t)
uncordonNode(pepperEnv, t)
@@ -678,9 +692,6 @@
cordonNode(pepperEnv, t)
drainNode(pepperEnv, t)
regenerateCerts(pepperEnv, t)
- if (UPGRADE_DOCKER.toBoolean()) {
- upgradeDocker(pepperEnv, t)
- }
performKubernetesComputeUpdate(pepperEnv, t)
uncordonNode(pepperEnv, t)
}
@@ -694,6 +705,7 @@
if (calicoEnabled(pepperEnv, ctl_node)) {
checkCalicoClusterState(pepperEnv, POOL)
}
+ printVersionInfo(pepperEnv, ctl_node)
if (CONFORMANCE_RUN_AFTER.toBoolean()) {
def target = CTL_TARGET
@@ -720,4 +732,4 @@
throw e
}
}
-}
\ No newline at end of file
+}
diff --git a/opencontrail4-update.groovy b/opencontrail4-update.groovy
new file mode 100644
index 0000000..01aae14
--- /dev/null
+++ b/opencontrail4-update.groovy
@@ -0,0 +1,309 @@
+/**
+ * Update pipeline for OpenContrail 4X versions
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ * STAGE_CONTROLLERS_UPDATE Run update on OpenContrail controller and analytic nodes (bool)
+ * STAGE_COMPUTES_UPDATE Run update OpenContrail components on compute nodes (bool)
+ *
+ **/
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+def supportedOcTargetVersions = ['4.0', '4.1']
+def neutronServerPkgs = 'neutron-plugin-contrail,contrail-heat,python-contrail'
+def config4Services = ['zookeeper', 'contrail-webui-middleware', 'contrail-webui', 'contrail-api', 'contrail-schema', 'contrail-svc-monitor', 'contrail-device-manager', 'contrail-config-nodemgr', 'contrail-database']
+def dashboardPanelPkg = 'openstack-dashboard-contrail-panels'
+def targetOcVersion
+
+def cmpMinions
+def cmpMinionsFirstSubset
+def cmpMinionsSecondSubset
+def cmpTargetAll
+def cmpTargetFirstSubset
+def cmpTargetSecondSubset
+
+def checkContrailServices(pepperEnv, oc_version, target) {
+
+ def checkCmd
+
+ if (oc_version.startsWith('4')) {
+
+ checkCmd = "doctrail all contrail-status | grep -v == | grep -v FOR | grep -v \\* | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/"
+
+ if (oc_version == '4.1') {
+ def targetMinions = salt.getMinions(pepperEnv, target)
+ def collectorMinionsInTarget = targetMinions.intersect(salt.getMinions(pepperEnv, 'I@opencontrail:collector'))
+
+ if (collectorMinionsInTarget.size() != 0) {
+ def cassandraConfigYaml = readYaml text: salt.getFileContent(pepperEnv, 'I@opencontrail:control:role:primary', '/etc/cassandra/cassandra.yaml')
+
+ def currentCassandraNativeTransportPort = cassandraConfigYaml['native_transport_port'] ?: "9042"
+ def currentCassandraRpcPort = cassandraConfigYaml['rpc_port'] ?: "9160"
+
+ def cassandraNativeTransportPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:port_configdb")
+ def cassandraCassandraRpcPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:rpc_port_configdb")
+
+ if (currentCassandraNativeTransportPort != cassandraNativeTransportPort) {
+ checkCmd += ' | grep -v \'contrail-collector.*(Database:Cassandra connection down)\''
+ }
+
+ if (currentCassandraRpcPort != cassandraCassandraRpcPort) {
+ checkCmd += ' | grep -v \'contrail-alarm-gen.*(Database:Cassandra\\[\\] connection down)\''
+ }
+ }
+ }
+
+ } else {
+ checkCmd = "contrail-status | grep -v == | grep -v FOR | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/"
+ }
+
+ salt.commandStatus(pepperEnv, target, checkCmd, null, false, true, null, true, 500)
+}
+
+def getValueForPillarKey(pepperEnv, target, pillarKey) {
+ def out = salt.getReturnValues(salt.getPillar(pepperEnv, target, pillarKey))
+ if (out == '') {
+ throw new Exception("Cannot get value for ${pillarKey} key on ${target} target")
+ }
+ return out.toString()
+}
+
+def cmpNodesUpdate(pepperEnv, target) {
+
+ def cmpPkgs = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
+ def aptCmd = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" ${cmpPkgs} -y;"
+ def kernelModuleReloadCmd = 'service contrail-vrouter-agent stop; service contrail-vrouter-nodemgr stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service contrail-vrouter-agent start; service contrail-vrouter-nodemgr start'
+ def out
+
+ try {
+ salt.runSaltProcessStep(pepperEnv, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, target, 'saltutil.sync_all', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, target, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+ salt.enforceState(pepperEnv, target, 'linux.system.repo')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail component on ${target} probably failed to be replaced. Please check availability of contrail packages before continuing.")
+ throw er
+ }
+
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', null, aptCmd, null)
+ salt.printSaltCommandResult(out)
+
+ try {
+ salt.enforceState(pepperEnv, target, 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail state was executed on ${target} and failed please fix it manually.")
+ }
+
+ salt.runSaltProcessStep(pepperEnv, target, 'cmd.shell', [kernelModuleReloadCmd], null, true)
+ salt.commandStatus(pepperEnv, target, 'contrail-status | grep -v == | grep -v active | grep -v -F /var/crashes/', null, false)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], 'cmd.shell', null, "contrail-status", null)
+ salt.printSaltCommandResult(out)
+}
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ if (STAGE_CONTROLLERS_UPDATE.toBoolean() == true) {
+
+ stage('Sync Salt data') {
+
+ // Sync data on minions
+ salt.runSaltProcessStep(pepperEnv, 'I@keystone:server:role:primary or I@opencontrail:database or I@neutron:server or I@horizon:server', 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@keystone:server:role:primary or I@opencontrail:database or I@neutron:server or I@horizon:server', 'saltutil.sync_all', [], null, true)
+ }
+
+ stage('Verify OpenContrail version compatibility') {
+
+ // Verify specified target OpenContrail version before update
+ targetOcVersion = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version")
+ if (!supportedOcTargetVersions.contains(targetOcVersion)) {
+ throw new Exception("Specified OpenContrail version ${targetOcVersion} is not supported by update pipeline. Supported versions: ${supportedOcTargetVersions}")
+ }
+ }
+
+ stage('Opencontrail controllers health check') {
+ try {
+ salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
+ } catch (Exception er) {
+ common.errorMsg("OpenContrail controllers health check stage found issues with services. Please take a look at the logs above.")
+ throw er
+ }
+ }
+
+ stage('Update system repositories') {
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+ salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector or I@neutron:server or I@horizon:server', 'linux.system.repo')
+
+ } catch (Exception er) {
+ common.errorMsg("System repositories failed to be updated on I@opencontrail:control, I@opencontrail:collector, I@neutron:server or I@horizon:server nodes.")
+ throw er
+ }
+ }
+
+ stage('OpenContrail controllers update') {
+
+ // Make sure that dedicated opencontrail user is created
+ salt.enforceState(pepperEnv, 'I@keystone:server:role:primary', 'keystone.client.server')
+
+ // Stop neutron-server to prevent creation of new objects in contrail
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
+
+ // Backup Zookeeper data
+ salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
+ salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
+
+ try {
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
+ } catch (Exception er) {
+ common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
+ throw er
+ }
+
+ // Backup Cassandra DB
+ salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
+ salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
+
+ try {
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+ } catch (Exception er) {
+ common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
+ throw er
+ }
+
+ try {
+ // Get docker images info
+ controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "docker:client:compose:opencontrail:service:controller:image")
+ analyticsImage = getValueForPillarKey(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail:service:analytics:image")
+ analyticsdbImage = getValueForPillarKey(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail:service:analyticsdb:image")
+
+ // Pull new docker images
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'dockerng.pull', [controllerImage])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsImage])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsdbImage])
+
+ } catch (Exception er) {
+ common.errorMsg("OpenContrail docker images failed be upgraded.")
+ throw er
+ }
+
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail.client'])
+
+ salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'docker.client')
+ if (targetOcVersion == '4.1') {
+ sleep(15)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["doctrail analyticsdb systemctl restart confluent-kafka"], null, true)
+ }
+ checkContrailServices(pepperEnv, targetOcVersion, 'I@opencontrail:collector')
+ } catch (Exception er) {
+ common.errorMsg("OpenContrail Analytic nodes failed to be upgraded.")
+ throw er
+ }
+
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:secondary', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+ for (service in config4Services) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ["doctrail controller systemctl stop ${service}"], null, true)
+ }
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:secondary', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+
+ salt.enforceState(pepperEnv, 'I@opencontrail:control:role:secondary', 'docker.client')
+ checkContrailServices(pepperEnv, targetOcVersion, 'I@opencontrail:control:role:secondary')
+
+ sleep(120)
+
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+
+ salt.enforceState(pepperEnv, 'I@opencontrail:control:role:primary', 'docker.client')
+ checkContrailServices(pepperEnv, targetOcVersion, 'I@opencontrail:control:role:primary')
+ } catch (Exception er) {
+ common.errorMsg("OpenContrail Controller nodes failed to be upgraded.")
+ throw er
+ }
+
+ // Run opencontrail.client state once contrail-api is ready to service requests from clients
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'state.sls', ['opencontrail.client'])
+
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.install', [neutronServerPkgs])
+ salt.runSaltProcessStep(pepperEnv, 'I@horizon:server', 'pkg.install', [dashboardPanelPkg])
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'])
+ salt.enforceState(pepperEnv, 'I@horizon:server', 'horizon')
+ } catch (Exception er) {
+ common.errorMsg("Update of packages on neutron and horizon nodes has been failed")
+ throw er
+ }
+ }
+ }
+
+ if (STAGE_COMPUTES_UPDATE.toBoolean() == true) {
+
+ try {
+ stage('List targeted compute servers') {
+ cmpMinions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
+ cmpMinionsFirstSubset = cmpMinions[0..<Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)]
+ cmpMinionsSecondSubset = cmpMinions - cmpMinionsFirstSubset
+
+ if (cmpMinions.isEmpty()) {
+ throw new Exception("No minions were found by specified target")
+ }
+
+ common.infoMsg("Found nodes: ${cmpMinions}")
+ common.infoMsg("Selected sample nodes: ${cmpMinionsFirstSubset}")
+
+ cmpTargetAll = cmpMinions.join(' or ')
+ cmpTargetFirstSubset = cmpMinionsFirstSubset.join(' or ')
+ cmpTargetSecondSubset = cmpMinionsSecondSubset.join(' or ')
+ }
+
+ stage('Compute nodes health check') {
+ try {
+ salt.enforceState(pepperEnv, cmpTargetAll, 'opencontrail.upgrade.verify', true, true)
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail compute nodes health check stage found issues with services. Please take a look at the logs above.")
+ throw er
+ }
+ }
+
+ stage('Confirm update on sample nodes') {
+ input message: "Do you want to continue with the Opencontrail components update on compute sample nodes? ${cmpTargetFirstSubset}"
+ }
+
+ stage("Opencontrail compute update on sample nodes") {
+
+ cmpNodesUpdate(pepperEnv, cmpTargetFirstSubset)
+ }
+
+ stage('Confirm update on all remaining target nodes') {
+
+ input message: "Do you want to continue with the Opencontrail components update on all targeted compute nodes? Node list: ${cmpTargetSecondSubset}"
+ }
+
+ stage("Opencontrail compute update on all targeted nodes") {
+
+ cmpNodesUpdate(pepperEnv, cmpTargetSecondSubset)
+ }
+
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
+ }
+}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index e67ab90..14a746b 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -179,6 +179,15 @@
"grep -r --exclude-dir=aptly -l 'apt_mk_version: .*' * | xargs --no-run-if-empty sed -i 's|apt_mk_version: .*|apt_mk_version: \"$targetMcpVersion\"|g'")
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
"grep -r --exclude-dir=aptly -l 'jenkins_pipelines_branch: .*' * | xargs --no-run-if-empty sed -i 's|jenkins_pipelines_branch: .*|jenkins_pipelines_branch: \"$gitTargetMcpVersion\"|g'")
+ // Set new k8s param
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'kubernetes_containerd_enabled: .*' * | xargs --no-run-if-empty sed -i 's|kubernetes_containerd_enabled: .*|kubernetes_containerd_enabled: True|g'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.salt' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.salt/system.linux.system.repo.mcp.apt_mirantis.salt-formulas/g'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.contrail' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.salt/system.linux.system.repo.mcp.apt_mirantis.contrail/g'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.salt/system.linux.system.repo.mcp.apt_mirantis.update/g'")
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
// Add new defaults
common.infoMsg("Add new defaults")