Merge "Add update contrail-related packages on ctl nodes"
diff --git a/cloud-update.groovy b/cloud-update.groovy
index ab72f76..8dea65b 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -42,6 +42,7 @@
*
**/
def common = new com.mirantis.mk.Common()
+def orchestrate = new com.mirantis.mk.Orchestrate()
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
def virsh = new com.mirantis.mk.Virsh()
@@ -864,6 +865,10 @@
/*
* Update section
*/
+
+ // Go through applications that using orchestrated deployment.
+ orchestrate.OrchestrateApplications(pepperEnv, "I@salt:master", "orchestration.deploy.applications")
+
if (updates.contains("cfg")) {
def target = 'I@salt:master'
def type = 'cfg'
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index aeaee9a..15518d4 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -4,6 +4,7 @@
* JOBS_NAMESPACE - Gerrit gating jobs namespace (mk, contrail, ...)
*
**/
+import groovy.json.JsonOutput
def common = new com.mirantis.mk.Common()
def gerrit = new com.mirantis.mk.Gerrit()
@@ -18,12 +19,7 @@
}
def callJobWithExtraVars(String jobName) {
- def gerritVars = '\n---'
- for (envVar in env.getEnvironment()) {
- if (envVar.key.startsWith("GERRIT_")) {
- gerritVars += "\n${envVar.key}: '${envVar.value}'"
- }
- }
+ def gerritVars = JsonOutput.toJson(env.getEnvironment().findAll{ it.key.startsWith('GERRIT_') })
testJob = build job: jobName, parameters: [
[$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: gerritVars]
]
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index be065a1..0279728 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -16,6 +16,12 @@
* CONFORMANCE_RUN_BEFORE Run Kubernetes conformance tests before update
* TEST_K8S_API_SERVER Kubernetes API server address for test execution
* ARTIFACTORY_URL Artifactory URL where docker images located. Needed to correctly fetch conformance images.
+ * UPGRADE_CALICO_V2_TO_V3 Perform Calico upgrade from v2 to v3.
+ * KUBERNETES_CALICO_IMAGE Target calico/node image. May be null in case of reclass-system rollout.
+ * KUBERNETES_CALICO_CALICOCTL_IMAGE Target calico/ctl image. May be null in case of reclass-system rollout.
+ * KUBERNETES_CALICO_CNI_IMAGE Target calico/cni image. May be null in case of reclass-system rollout.
+ * KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE Target calico/kube-controllers image. May be null in case of reclass-system rollout.
+ * CALICO_UPGRADE_VERSION Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
*
**/
def common = new com.mirantis.mk.Common()
@@ -25,6 +31,9 @@
def updates = TARGET_UPDATES.tokenize(",").collect{it -> it.trim()}
def pepperEnv = "pepperEnv"
+def POOL = "I@kubernetes:pool"
+def calicoImagesValid = false
+
def overrideKubernetesImage(pepperEnv) {
def salt = new com.mirantis.mk.Salt()
@@ -37,6 +46,41 @@
}
}
+def overrideCalicoImages(pepperEnv) {
+ def salt = new com.mirantis.mk.Salt()
+
+ def calicoSaltOverrides = """
+ kubernetes_calico_image: ${KUBERNETES_CALICO_IMAGE}
+ kubernetes_calico_calicoctl_image: ${KUBERNETES_CALICO_CALICOCTL_IMAGE}
+ kubernetes_calico_cni_image: ${KUBERNETES_CALICO_CNI_IMAGE}
+ kubernetes_calico_kube_controllers_image: ${KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE}
+ """
+ stage("Override calico images to target version") {
+ salt.setSaltOverrides(pepperEnv, calicoSaltOverrides)
+ }
+}
+
+def downloadCalicoUpgrader(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Downloading calico-upgrade utility") {
+ salt.cmdRun(pepperEnv, target, "rm -f ./calico-upgrade")
+ salt.cmdRun(pepperEnv, target, "wget https://github.com/projectcalico/calico-upgrade/releases/download/${CALICO_UPGRADE_VERSION}/calico-upgrade")
+ salt.cmdRun(pepperEnv, target, "chmod +x ./calico-upgrade")
+ }
+}
+
+def pullCalicoImages(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Pulling updated Calico docker images") {
+ salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_IMAGE}")
+ salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_CALICOCTL_IMAGE}")
+ salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_CNI_IMAGE}")
+ salt.cmdRun(pepperEnv, target, "docker pull ${KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE}")
+ }
+}
+
def performKubernetesComputeUpdate(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
@@ -56,6 +100,54 @@
}
}
+def startCalicoUpgrade(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Starting upgrade using calico-upgrade: migrate etcd schema and lock Calico") {
+ // get ETCD_ENDPOINTS in use by Calico
+ def ep_str = salt.cmdRun(pepperEnv, target, "cat /etc/calico/calicoctl.cfg | grep etcdEndpoints")['return'][0].values()[0]
+ def ETCD_ENDPOINTS = ep_str.tokenize(' ')[1]
+ print("ETCD_ENDPOINTS in use by Calico: '${ETCD_ENDPOINTS}'")
+
+ def cmd = "export APIV1_ETCD_ENDPOINTS=${ETCD_ENDPOINTS} && " +
+ "export APIV1_ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem && " +
+ "export APIV1_ETCD_CERT_FILE=/var/lib/etcd/etcd-client.crt && " +
+ "export APIV1_ETCD_KEY_FILE=/var/lib/etcd/etcd-client.key && " +
+ "export ETCD_ENDPOINTS=${ETCD_ENDPOINTS} && " +
+ "export ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem && " +
+ "export ETCD_CERT_FILE=/var/lib/etcd/etcd-client.crt && " +
+ "export ETCD_KEY_FILE=/var/lib/etcd/etcd-client.key && " +
+ "rm /root/upg_complete -f && " +
+ "./calico-upgrade start --no-prompts --ignore-v3-data > upgrade-start.log && " +
+ "until [ -f /root/upg_complete ]; do sleep 0.1; done && " +
+ "./calico-upgrade complete --no-prompts > upgrade-complete.log && " +
+ "rm /root/upg_complete -f"
+ // "saltArgs = ['async']" doesn't work, so we have to run "cmd.run --async"
+ salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' cmd.run '${cmd}' --async")
+ salt.cmdRun(pepperEnv, target, "until [ -f /root/upgrade-start.log ]; do sleep 0.1; done")
+ }
+}
+
+def completeCalicoUpgrade(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Complete upgrade using calico-upgrade: unlock Calico") {
+ salt.cmdRun(pepperEnv, target, "echo 'true' > /root/upg_complete")
+ salt.cmdRun(pepperEnv, target, "while [ -f /root/upg_complete ]; do sleep 0.1; done")
+ salt.cmdRun(pepperEnv, target, "cat /root/upgrade-start.log")
+ salt.cmdRun(pepperEnv, target, "cat /root/upgrade-complete.log")
+ }
+}
+
+def performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Performing Calico configuration update and services restart") {
+ salt.enforceState(pepperEnv, target, "kubernetes.pool.calico")
+ salt.runSaltProcessStep(pepperEnv, target, 'service.restart', ['kubelet'])
+ }
+}
+
def cordonNode(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
def originalTarget = "I@kubernetes:master and not ${target}"
@@ -169,6 +261,15 @@
}
}
+def checkCalicoUpgradeSuccessful(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Checking cluster state after Calico upgrade") {
+ // TODO add auto-check of results
+ salt.cmdRun(pepperEnv, target, "calicoctl version | grep -i version")
+ salt.cmdRun(pepperEnv, target, "calicoctl node status")
+ }
+}
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -190,8 +291,42 @@
overrideKubernetesImage(pepperEnv)
}
+ if ((common.validInputParam('KUBERNETES_CALICO_IMAGE'))
+ && (common.validInputParam('KUBERNETES_CALICO_CALICOCTL_IMAGE'))
+ && (common.validInputParam('KUBERNETES_CALICO_CNI_IMAGE'))
+ && (common.validInputParam('KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE'))
+ ) {
+ calicoImagesValid = true
+ overrideCalicoImages(pepperEnv)
+ }
+
/*
- * Execute update
+ * Execute Calico upgrade if needed (only for v2 to v3 upgrade).
+ * This part causes workloads operations downtime.
+ * It is only required for Calico v2.x to v3.x upgrade when etcd is in use for Calico
+ * as Calico etcd schema has different formats for Calico v2.x and Calico v3.x.
+ */
+ if (UPGRADE_CALICO_V2_TO_V3.toBoolean()) {
+ // one CTL node will be used for running upgrade of Calico etcd schema
+ def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
+
+ // prepare for upgrade. when done in advance, this will decrease downtime during upgrade
+ downloadCalicoUpgrader(pepperEnv, ctl_node)
+ if (calicoImagesValid) {
+ pullCalicoImages(pepperEnv, POOL)
+ }
+
+ // this sequence implies workloads operations downtime
+ startCalicoUpgrade(pepperEnv, ctl_node)
+ performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, POOL)
+ completeCalicoUpgrade(pepperEnv, ctl_node)
+ // after that no downtime is expected
+
+ checkCalicoUpgradeSuccessful(pepperEnv, POOL)
+ }
+
+ /*
+ * Execute k8s update
*/
if (updates.contains("ctl")) {
def target = CTL_TARGET
@@ -218,7 +353,7 @@
performKubernetesControlUpdate(pepperEnv, target)
}
if (!SIMPLE_UPGRADE.toBoolean()) {
- // Addons upgrade should be performed after all nodes will upgraded
+ // Addons upgrade should be performed after all nodes will be upgraded
updateAddons(pepperEnv, target)
// Wait for 90 sec for addons reconciling
sleep(90)
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index c20c3a0..b585e7e 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -19,6 +19,19 @@
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ stage('Opencontrail controllers health check') {
+ try {
+ salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail controllers health check stage found issues with services. Please take a look at the logs above.")
+ throw er
+ }
+ }
+
+ stage('Backup') {
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'bash /usr/local/bin/cassandra-backup-runner-call.sh')
+ }
+
stage('Restore') {
// get opencontrail version
def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
@@ -55,11 +68,9 @@
// the lovely wait-60-seconds mantra before restarting supervisor-database service
sleep(60)
salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
- // another mantra
+ // another mantra, wait till all services are up
sleep(60)
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
- }
- else {
+ } else {
try {
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
} catch (Exception er) {
@@ -125,5 +136,12 @@
salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
}
}
+
+ stage('Opencontrail controllers health check') {
+ common.retry(3, 20){
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
+ salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
+ }
+ }
}
}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index bd3373c..cee90d1 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -12,6 +12,8 @@
Always test GERRIT_REFSPEC VS GERRIT_BRANCH-master version of opposite project
*/
+import groovy.json.JsonOutput
+
common = new com.mirantis.mk.Common()
gerrit = new com.mirantis.mk.Gerrit()
git = new com.mirantis.mk.Git()
@@ -20,6 +22,9 @@
extraVarsYAML = env.EXTRA_VARIABLES_YAML.trim() ?: ''
if (extraVarsYAML) {
common.mergeEnv(env, extraVarsYAML)
+ extraVars = readYaml text: extraVarsYAML
+} else {
+ extraVars = [:]
}
slaveNode = env.SLAVE_NODE ?: 'docker'
@@ -74,16 +79,15 @@
// modelFile - `modelfiname` from model/modelfiname/modelfiname.yaml
//* Grub all models and send it to check in paralell - by one in thread.
def _uuid = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}_" + UUID.randomUUID().toString().take(8)
- def _values_string = """
----
-MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}"
-DockerCName: "${_uuid}"
-testReclassEnv: "model/${modelFile}/"
-modelFile: "contexts/${modelFile}.yml"
-DISTRIB_REVISION: "${testDistribRevision}"
-useExtraRepos: ${useExtraRepos}
-${extraVarsYAML.replaceAll('---', '')}
-"""
+ def _values = [
+ MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}",
+ DockerCName: _uuid,
+ testReclassEnv: "model/${modelFile}/",
+ modelFile: "contexts/${modelFile}.yml",
+ DISTRIB_REVISION: testDistribRevision,
+ useExtraRepos: useExtraRepos,
+ ]
+ def _values_string = JsonOutput.toJson(_values << extraVars)
def chunkJob = build job: chunkJobName, parameters: [
[$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML',
value : _values_string.stripIndent()],
@@ -158,14 +162,14 @@
if (env.GERRIT_PROJECT) {
messages.add("<font color='red'>GerritTrigger detected! We are in auto-mode:</font>")
messages.add("Test env variables has been changed:")
- messages.add("COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}")
- messages.add("RECLASS_MODEL_BRANCH => ${gerritDataRS['gerritBranch']}")
// TODO are we going to have such branches?
if (!['nightly', 'testing', 'stable', 'proposed', 'master'].contains(env.GERRIT_BRANCH)) {
gerritDataCC['gerritBranch'] = env.GERRIT_BRANCH
gerritDataRS['gerritBranch'] = env.GERRIT_BRANCH
testDistribRevision = env.GERRIT_BRANCH
}
+ messages.add("COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}")
+ messages.add("RECLASS_SYSTEM_BRANCH => ${gerritDataRS['gerritBranch']}")
// Identify, who triggered. To whom we should pass refspec
if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
gerritDataRS['gerritRefSpec'] = env.GERRIT_REFSPEC
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index 04eafeb..a8e10b7 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -1,3 +1,5 @@
+import groovy.json.JsonOutput
+
def gerrit = new com.mirantis.mk.Gerrit()
def common = new com.mirantis.mk.Common()
@@ -7,12 +9,7 @@
if (extraVarsYaml != '') {
common.mergeEnv(env, extraVarsYaml)
} else {
- extraVarsYaml = '\n---'
- for (envVar in env.getEnvironment()) {
- if (envVar.key.startsWith("GERRIT_")) {
- extraVarsYaml += "\n${envVar.key}: '${envVar.value}'"
- }
- }
+ extraVarsYaml = JsonOutput.toJson(env.getEnvironment().findAll{ it.key.startsWith('GERRIT_') })
}
def slaveNode = env.SLAVE_NODE ?: 'python&&docker'
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 566caa9..ad207cb 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -5,7 +5,7 @@
* Expected parameters:
* SALT_MASTER_URL Salt API server location
* SALT_MASTER_CREDENTIALS Credentials to the Salt API
- * MCP_VERSION Version of MCP to upgrade to
+ * TARGET_MCP_VERSION Version of MCP to upgrade to
* UPGRADE_SALTSTACK Upgrade SaltStack packages to new version.
* UPDATE_CLUSTER_MODEL Update MCP version parameter in cluster model
* UPDATE_PIPELINES Update pipeline repositories on Gerrit
@@ -17,10 +17,11 @@
common = new com.mirantis.mk.Common()
python = new com.mirantis.mk.Python()
jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
+def pipelineTimeout = 12
venvPepper = "venvPepper"
workspace = ""
-def triggerMirrorJob(jobName){
+def triggerMirrorJob(jobName) {
params = jenkinsUtils.getJobParameters(jobName)
build job: jobName, parameters: [
[$class: 'StringParameterValue', name: 'BRANCHES', value: params.get("BRANCHES")],
@@ -30,88 +31,102 @@
]
}
-def updateSaltStack(target, pkgs){
- try{
+def updateSaltStack(target, pkgs) {
+ try {
salt.runSaltProcessStep(venvPepper, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 5)
- }catch(Exception ex){}
+ } catch (Exception ex) {
+ }
- common.retry(20, 60){
+ common.retry(20, 60) {
salt.minionsReachable(venvPepper, 'I@salt:master', '*')
def running = salt.runSaltProcessStep(venvPepper, target, 'saltutil.running', [], null, true, 5)
- for(value in running.get("return")[0].values()){
- if(value != []){
+ for (value in running.get("return")[0].values()) {
+ if (value != []) {
throw new Exception("Not all salt-minions are ready for execution")
}
}
}
- def saltVersion = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_version").get("return")[0].values()[0]
+ def saltVersion = salt.getPillar(venvPepper, 'I@salt:master', '_param:salt_version').get('return')[0].values()[0]
def saltMinionVersions = salt.cmdRun(venvPepper, target, "apt-cache policy salt-common | awk '/Installed/ && /$saltVersion/'").get("return")
def saltMinionVersion = ""
- for(minion in saltMinionVersions[0].keySet()){
- saltMinionVersion = saltMinionVersions[0].get(minion).replace("Salt command execution success","").trim()
- if(saltMinionVersion == ""){
+ for (minion in saltMinionVersions[0].keySet()) {
+ saltMinionVersion = saltMinionVersions[0].get(minion).replace("Salt command execution success", "").trim()
+ if (saltMinionVersion == "") {
error("Installed version of Salt on $minion doesn't match specified version in the model.")
}
}
}
-def archiveReclassInventory(filename){
+def archiveReclassInventory(filename) {
def ret = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -i", true, null, false)
def reclassInv = ret.values()[0]
writeFile file: filename, text: reclassInv.toString()
archiveArtifacts artifacts: "$filename"
}
-def pipelineTimeout = 12
-if (common.validInputParam('PIPELINE_TIMEOUT') && PIPELINE_TIMEOUT.isInteger()) {
- pipelineTimeout = "${PIPELINE_TIMEOUT}".toInteger()
+if (common.validInputParam('PIPELINE_TIMEOUT') && env.PIPELINE_TIMEOUT.isInteger()) {
+ pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
}
timeout(time: pipelineTimeout, unit: 'HOURS') {
node("python") {
try {
- def gitMcpVersion = MCP_VERSION
workspace = common.getWorkspace()
+ targetMcpVersion = null
+ if (!common.validInputParam('TARGET_MCP_VERSION') && !common.validInputParam('MCP_VERSION')) {
+ error('You must specify MCP version in TARGET_MCP_VERSION|MCP_VERSION variable')
+ }
+ // bw comp. for 2018.X => 2018.11 release
+ if (common.validInputParam('MCP_VERSION')){
+ targetMcpVersion = env.MCP_VERSION
+ common.warningMsg("targetMcpVersion has been changed to:${targetMcpVersion}, which was taken from deprecated pipeline viriable:MCP_VERSION")
+ }
+ else {
+ targetMcpVersion = env.TARGET_MCP_VERSION
+ }
+ // end bw comp. for 2018.X => 2018.11 release
+ def gitTargetMcpVersion = targetMcpVersion
+ if (targetMcpVersion == 'testing') {
+ gitTargetMcpVersion = 'master'
+ common.warningMsg("gitTargetMcpVersion has been changed to:${gitTargetMcpVersion}")
+ }
python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- if(MCP_VERSION == ""){
- error("You must specify MCP version")
- }
- if(MCP_VERSION == "testing"){
- gitMcpVersion = "master"
- }
-
- stage("Update Reclass"){
+ stage("Update Reclass") {
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
- try{
+ try {
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
}
- catch(Exception ex){
+ catch (Exception ex) {
error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
}
- if(UPDATE_CLUSTER_MODEL.toBoolean()){
+ if (UPDATE_CLUSTER_MODEL.toBoolean()) {
+ common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
def dateTime = common.getDatetime()
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
- "grep -r --exclude-dir=apty -l 'apt_mk_version: .*' * | xargs sed -i 's/apt_mk_version: .*/apt_mk_version: \"$MCP_VERSION\"/g'")
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitMcpVersion")
+ "grep -r --exclude-dir=aptly -l 'mcp_version: .*' * | xargs --no-run-if-empty sed -i 's/mcp_version: .*/mcp_version: \"$targetMcpVersion\"/g'")
+ // Do the same, for deprecated variable-duplicate
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'apt_mk_version: .*' * | xargs --no-run-if-empty sed -i 's/apt_mk_version: .*/apt_mk_version: \"$targetMcpVersion\"/g'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitTargetMcpVersion")
// Add new defaults
common.infoMsg("Add new defaults")
salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
- "sed -i 's/^classes:/classes:\\n- system.defaults/' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
+ "sed -i 's/^classes:/classes:\\n- system.defaults/' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
common.infoMsg("The following changes were made to the cluster model and will be commited. " +
- "Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
+ "Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && " +
- "git add -u && git commit --allow-empty -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
+ "git add -u && git commit --allow-empty -m 'Cluster model update to the release $targetMcpVersion on $dateTime'")
}
salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true)
}
- if(UPDATE_LOCAL_REPOS.toBoolean()){
+ if (UPDATE_LOCAL_REPOS.toBoolean()) {
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
- stage("Update local repos"){
+ stage("Update local repos") {
common.infoMsg("Updating local repositories")
def engine = salt.getPillar(venvPepper, 'I@aptly:publisher', "aptly:publisher:source:engine")
@@ -119,25 +134,22 @@
if (runningOnDocker) {
common.infoMsg("Aptly is running as Docker container")
- }
- else {
+ } else {
common.infoMsg("Aptly isn't running as Docker container. Going to use aptly user for executing aptly commands")
}
- if(runningOnDocker){
- salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true)
- }
- else{
- salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true, ['runas=aptly'])
+ if (runningOnDocker) {
+ salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs --no-run-if-empty -n 1 aptly mirror drop -force", true, null, true)
+ } else {
+ salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs --no-run-if-empty -n 1 aptly mirror drop -force", true, null, true, ['runas=aptly'])
}
salt.enforceState(venvPepper, 'I@aptly:publisher', 'aptly', true)
- if(runningOnDocker){
+ if (runningOnDocker) {
salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv"], null, true)
salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-frv -u http://10.99.0.1:8080"], null, true)
- }
- else{
+ } else {
salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-afrv", 'runas=aptly'], null, true)
}
@@ -152,12 +164,12 @@
}
}
- stage("Update Drivetrain"){
- salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$MCP_VERSION/4' /etc/apt/sources.list.d/mcp_salt.list")
+ stage("Update Drivetrain") {
+ salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$targetMcpVersion/4' /etc/apt/sources.list.d/mcp_salt.list")
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get -o Dir::Etc::sourcelist='/etc/apt/sources.list.d/mcp_salt.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update")
// Workaround for PROD-22108
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get purge -y salt-formula-octavia && " +
- "apt-get install -y salt-formula-octavia")
+ "apt-get install -y salt-formula-octavia")
// End workaround for PROD-22108
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get install -y --allow-downgrades salt-formula-*")
@@ -166,25 +178,25 @@
archiveReclassInventory(inventoryBeforeFilename)
- salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$MCP_VERSION/4' /etc/apt/sources.list.d/mcp_extra.list")
+ salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$targetMcpVersion/4' /etc/apt/sources.list.d/mcp_extra.list")
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get -o Dir::Etc::sourcelist='/etc/apt/sources.list.d/mcp_extra.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update")
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get install -y --allow-downgrades reclass")
salt.fullRefresh(venvPepper, 'I@salt:master')
- try{
+ try {
salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
}
- catch(Exception ex){
+ catch (Exception ex) {
error("Reclass fails rendering. Pay attention to your cluster model.")
}
salt.fullRefresh(venvPepper, '*')
- try{
+ try {
salt.cmdRun(venvPepper, 'I@salt:master', "reclass-salt --top")
}
- catch(Exception ex){
+ catch (Exception ex) {
error("Reclass fails rendering. Pay attention to your cluster model.")
}
@@ -193,7 +205,7 @@
sh "diff -u $inventoryBeforeFilename $inventoryAfterFilename > reclass-inventory-diff.out || true"
archiveArtifacts artifacts: "reclass-inventory-diff.out"
- if(UPGRADE_SALTSTACK.toBoolean()){
+ if (UPGRADE_SALTSTACK.toBoolean()) {
salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
@@ -201,7 +213,7 @@
updateSaltStack("I@salt:minion and not I@salt:master", '["salt-minion"]')
}
- if(UPDATE_PIPELINES.toBoolean()){
+ if (UPDATE_PIPELINES.toBoolean()) {
triggerMirrorJob("git-mirror-downstream-mk-pipelines")
triggerMirrorJob("git-mirror-downstream-pipeline-library")
}
@@ -214,12 +226,12 @@
common.infoMsg("Checking if Docker containers are up")
- try{
- common.retry(10, 30){
+ try {
+ common.retry(10, 30) {
salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
}
}
- catch(Exception ex){
+ catch (Exception ex) {
error("Docker containers for CI/CD services are having troubles with starting.")
}
}