Merge "Add build pkg-promote in Promote MCP job"
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 92701bd..3fd7723 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -168,9 +168,9 @@
}
def osRelease = salt.getGrain(pepperEnv, target, 'lsb_distrib_codename')
if (osRelease.toString().toLowerCase().contains('trusty')) {
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --force-yes -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" '
+ args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --force-yes -o Dpkg::Options::=\"--force-confold\" '
} else {
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q -f --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" '
+ args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q -f --allow-downgrades -o Dpkg::Options::=\"--force-confold\" '
}
if (out.toString().contains('errors:')) {
try {
@@ -278,7 +278,7 @@
}
stage('Apply package downgrades') {
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" '
+ args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confold\" '
common.infoMsg('Performing pkgs purge/remove ... ')
try {
if (PURGE_PKGS != "") {
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 6b5c0e2..2858e81 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -12,21 +12,21 @@
* PROXY Proxy to use for cloning repo or for pip
* IMAGE Docker image to use for running container with test framework.
* DEBUG_MODE If you need to debug (keep container after test), please enabled this
- * To launch tests from cvp_spt docker images need to set IMAGE and left TESTS_REPO empty
+ * To launch tests from docker images need to set IMAGE and left TESTS_REPO empty
*/
common = new com.mirantis.mk.Common()
validate = new com.mirantis.mcp.Validate()
salt = new com.mirantis.mk.Salt()
salt_testing = new com.mirantis.mk.SaltModelTesting()
-def artifacts_dir = "validation_artifacts/"
+def artifacts_dir = "validation_artifacts"
def remote_dir = '/root/qa_results'
def container_workdir = '/var/lib'
-def name = 'cvp-spt'
-def xml_file = "${name}_report.xml"
+def container_name = "${env.JOB_NAME}"
+def xml_file = "${container_name}_report.xml"
def TARGET_NODE = "I@gerrit:client"
def reinstall_env = false
-def container_name = "${env.JOB_NAME}"
+
def saltMaster
def settings
@@ -76,7 +76,7 @@
def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
def username = creds.username
def password = creds.password
- def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET}"
+ def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET} -vv"
sh "mkdir -p ${artifacts_dir}"
@@ -86,12 +86,12 @@
'dockerMaxCpus': 2,
'dockerExtraOpts' : [
"-v /root/qa_results/:/root/qa_results/",
- "-v ${env.WORKSPACE}/validation_artifacts/:${container_workdir}/validation_artifacts/",
+ "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
+ // TODO remove if all docker images with tests (like cvp-spt) will be transferred into new architucture (like cvp-sanity)
"--entrypoint=''", // to override ENTRYPOINT=/bin/bash in Dockerfile of image
],
'envOpts' : [
- "WORKSPACE=${container_workdir}/${name}",
"SALT_USERNAME=${username}",
"SALT_PASSWORD=${password}",
"SALT_URL=${SALT_MASTER_URL}"
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 0de5590..163ec5a 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -20,12 +20,15 @@
import groovy.json.JsonSlurper
common = new com.mirantis.mk.Common()
+jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
external = false
externalMarker = '/mirantis/external/'
slaveNode = env.SLAVE_NODE ?: 'docker'
setDefaultArtifactoryProperties = env.SET_DEFAULT_ARTIFACTORY_PROPERTIES ?: true
+
+
def getImageName(String image) {
def regex = Pattern.compile('(?:.+/)?([^:]+)(?::.+)?')
def matcher = regex.matcher(image)
@@ -63,12 +66,20 @@
timeout(time: 4, unit: 'HOURS') {
node(slaveNode) {
- def user = ''
- wrap([$class: 'BuildUser']) {
- user = env.BUILD_USER_ID
- }
+ def user = jenkinsUtils.currentUsername()
currentBuild.description = "${user}: [${env.SOURCE_IMAGE_TAG} => ${env.IMAGE_TAG}]\n${env.IMAGE_LIST}"
try {
+ allowedGroups = ['release-engineering']
+ releaseTags = ['proposed', 'release', 'testing', '2018', '2019', '2020']
+ tags = [env.SOURCE_IMAGE_TAG, env.IMAGE_TAG]
+ tagInRelease = tags.any { tag -> releaseTags.any { tag.contains(it) } }
+ if (tagInRelease) {
+ if (!jenkinsUtils.currentUserInGroups(allowedGroups)) {
+ error: "You - ${user} - don't have permissions to run this job with tags ${tags}!"
+ } else {
+ echo "User `${user}` belongs to one of groups `${allowedGroups}`. Proceeding..."
+ }
+ }
stage("Mirror Docker Images") {
def images = IMAGE_LIST.tokenize('\n')
@@ -128,13 +139,17 @@
if (external) {
artifactoryProperties << ['com.mirantis.externalImage': external]
}
- def sourceRegistry = sourceImage.split('/')[0]
- def sourceImgUrl = imageURL(sourceRegistry, sourceImage, source_image_sha256) - '/manifest.json'
- def existingProps = mcp_artifactory.getPropertiesForArtifact(sourceImgUrl)
def historyProperties = []
- // check does the source image have already history props
- if (existingProps) {
- historyProperties = existingProps.get('com.mirantis.versionHistory', [])
+ try {
+ def sourceRegistry = sourceImage.split('/')[0]
+ def sourceImgUrl = imageURL(sourceRegistry, sourceImage, source_image_sha256) - '/manifest.json'
+ def existingProps = mcp_artifactory.getPropertiesForArtifact(sourceImgUrl)
+ // check does the source image have already history props
+ if (existingProps) {
+ historyProperties = existingProps.get('com.mirantis.versionHistory', [])
+ }
+ } catch (Exception e) {
+ common.warningMsg("Can't find history for ${sourceImage}.")
}
// %5C - backslash symbol is needed
historyProperties.add("${buildTime}%5C=${sourceImage}")
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index e1c80f0..eeb9f71 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -39,6 +39,7 @@
ssh.ensureKnownHosts(GERRIT_HOST)
def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
def doSubmit = false
+ def skipProjectsVerify = ['mk/docker-jnlp-slave']
stage("test") {
if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")) {
// test max CodeReview
@@ -46,30 +47,35 @@
doSubmit = true
def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
- def jobsNamespace = JOBS_NAMESPACE
- def plural_namespaces = ['salt-formulas', 'salt-models']
- // remove plural s on the end of job namespace
- if (JOBS_NAMESPACE in plural_namespaces) {
- jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
- }
- // salt-formulas tests have -latest on end of the name
- if (JOBS_NAMESPACE.equals("salt-formulas")) {
- gerritProject = gerritProject + "-latest"
- }
- def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
- if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
- callJobWithExtraVars('test-salt-model-ci-wrapper')
+ if (gerritProject in skipProjectsVerify) {
+ common.successMsg("Project ${gerritProject} doesn't require verify, skipping...")
+ giveVerify = true
} else {
- if (isJobExists(testJob)) {
- common.infoMsg("Test job ${testJob} found, running")
- def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
- ]
- giveVerify = true
+ def jobsNamespace = JOBS_NAMESPACE
+ def plural_namespaces = ['salt-formulas', 'salt-models']
+ // remove plural s on the end of job namespace
+ if (JOBS_NAMESPACE in plural_namespaces) {
+ jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+ }
+ // salt-formulas tests have -latest on end of the name
+ if (JOBS_NAMESPACE.equals("salt-formulas")) {
+ gerritProject = gerritProject + "-latest"
+ }
+ def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+ if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ callJobWithExtraVars('test-salt-model-ci-wrapper')
} else {
- common.infoMsg("Test job ${testJob} not found")
+ if (isJobExists(testJob)) {
+ common.infoMsg("Test job ${testJob} found, running")
+ def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+ ]
+ giveVerify = true
+ } else {
+ common.infoMsg("Test job ${testJob} not found")
+ }
}
}
} else {
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 9df7611..124f96b 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -156,6 +156,9 @@
}
context['secrets_encryption_key_id'] = secretKeyID
}
+ if (context.get('cfg_failsafe_ssh_public_key')) {
+ writeFile file:'failsafe-ssh-key.pub', text:context['cfg_failsafe_ssh_public_key']
+ }
python.setupCookiecutterVirtualenv(cutterEnv)
// FIXME refactor generateModel
python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, modelEnv, templateEnv, false)
@@ -211,6 +214,9 @@
if (context['secrets_encryption_enabled'] == 'True') {
args = "--gpg-key gpgkey.asc " + args
}
+ if (context.get('cfg_failsafe_ssh_public_key')) {
+ args = "--ssh-key failsafe-ssh-key.pub " + args
+ }
// load data from model
def smc = [:]
diff --git a/generate-repo-snapshot-context.groovy b/generate-repo-snapshot-context.groovy
new file mode 100644
index 0000000..e51dbc4
--- /dev/null
+++ b/generate-repo-snapshot-context.groovy
@@ -0,0 +1,75 @@
+/**
+ Repo-resolver job which will generate current repo snapshots
+ in Reclass format for testing/nightly,proposed versions.
+
+ Output example:
+
+ parameters:
+ _param:
+ nightly:
+ linux_system_repo_mcp_salt_url: http://mirror.mirantis.com/.snapshots/nightly-salt-formulas-xenial-2018-12-21-181741
+ testing:
+ linux_system_repo_mcp_salt_url: http://mirror.mirantis.com/.snapshots/nightly-salt-formulas-xenial-2018-12-21-181741
+ proposed:
+ linux_system_repo_mcp_salt_url: http://mirror.mirantis.com/.snapshots/nightly-salt-formulas-xenial-2018-12-21-181741
+
+ * Expected parameters:
+ MIRROR_HOST - Mirror host to use to generate snapshots context
+*/
+common = new com.mirantis.mk.Common()
+mirror = new com.mirantis.mk.Mirror()
+
+mirrorHost = env.MIRROR_HOST ?: 'mirror.mirantis.com'
+slaveNode = env.SLAVE_NODE ?: 'virtual'
+
+node(slaveNode) {
+
+ def fileName = 'repo-context.yml'
+ // TODO: replace with dynamical subsetting from reclass
+ def ceph_codename = 'luminous'
+ def elasticsearch_version = '5'
+ def glusterfs_version_number = '3.8'
+ def saltstack_version_number = '2017.7'
+ def versions = ['testing', 'proposed', 'nightly']
+ def components = [
+ 'repo_mcp_aptly':'aptly',
+ 'repo_mcp_cassandra':'cassandra',
+ 'repo_mcp_ceph': "ceph-${ceph_codename}",
+ 'repo_mcp_docker_legacy': 'docker-1.x',
+ 'repo_mcp_docker':'docker',
+ 'repo_mcp_elasticsearch_curator': 'elasticsearch-curator-5',
+ 'repo_mcp_elasticsearch': "elasticsearch-${elasticsearch_version}.x",
+ 'repo_mcp_extra': 'extra',
+ 'repo_mcp_glusterfs': "glusterfs-${glusterfs_version_number}",
+ 'repo_mcp_influxdb': 'influxdb',
+ 'repo_mcp_jenkins': 'jenkins',
+ 'repo_mcp_maas': 'maas',
+ 'repo_mcp_percona': 'percona',
+ 'repo_mcp_saltstack': "saltstack-${saltstack_version_number}",
+ 'repo_mcp_fluentd_url': 'td-agent',
+ 'repo_mcp_salt_url': 'salt-formulas',
+ ]
+
+ stage('Generate context') {
+ def meta = ['_param': [:]]
+ versions.each {
+ // ubuntu has target.txt in version root
+ meta['_param'][it] = ['linux_system_repo_ubuntu_url': mirror.getLatestSnapshotMeta(mirrorHost, it, '', 'ubuntu')['repoUrl'] ]
+ }
+ components.each { componentKey, componentRepo ->
+ for(version in versions) {
+ def versionMeta = [:]
+ try {
+ versionMeta["linux_system_repo_${componentKey}_url"] = mirror.getLatestSnapshotMeta(mirrorHost, version, componentRepo)['repoUrl']
+ } catch(Exception e) {
+ common.errorMsg(e)
+ continue
+ }
+ meta['_param'][version] << versionMeta
+ }
+ }
+
+ writeYaml file: fileName, data: ['parameters': meta ]
+ archiveArtifacts artifacts: fileName
+ }
+}
\ No newline at end of file
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index 1e09917..a358222 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -227,7 +227,7 @@
return
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" ${CMP_PKGS} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
@@ -265,7 +265,7 @@
return
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" ${CMP_PKGS} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
@@ -443,7 +443,7 @@
return
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" ${CMP_PKGS} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
@@ -481,7 +481,7 @@
return
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" ${CMP_PKGS} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
@@ -510,4 +510,4 @@
}
}
}
-}
\ No newline at end of file
+}
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 63cb5b4..93db009 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -13,11 +13,12 @@
*
**/
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
+def supportedOcTargetVersions = ['4.0', '4.1']
def targetLiveSubset
def targetLiveAll
def minions
@@ -39,19 +40,53 @@
def thirdPartyServicesToDisable = ['kafka', 'zookeeper', 'cassandra']
def config4Services = ['zookeeper', 'contrail-webui-middleware', 'contrail-webui', 'contrail-api', 'contrail-schema', 'contrail-svc-monitor', 'contrail-device-manager', 'contrail-config-nodemgr', 'contrail-database']
-def runCommonCommands(target, command, args, check, salt, pepperEnv) {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
- salt.printSaltCommandResult(out)
- // wait until $check is in correct state
- if ( check == "nodetool status" ) {
- salt.commandStatus(pepperEnv, target, check, 'Status=Up')
- } else if ( check == "doctrail all contrail-status" ) {
- salt.commandStatus(pepperEnv, target, "${check} | grep -v == | grep -v FOR | grep -v \\* | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/", null, false, true, null, true, 500)
- } else if ( check == "contrail-status" ) {
- salt.commandStatus(pepperEnv, target, "${check} | grep -v == | grep -v FOR | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/", null, false, true, null, true, 500)
+def checkContrailServices(pepperEnv, oc_version, target) {
+
+ def checkCmd
+
+ if (oc_version.startsWith('4')) {
+
+ checkCmd = "doctrail all contrail-status | grep -v == | grep -v FOR | grep -v \\* | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/"
+
+ if (oc_version == '4.1') {
+ def targetMinions = salt.getMinions(pepperEnv, target)
+ def collectorMinionsInTarget = targetMinions.intersect(salt.getMinions(pepperEnv, 'I@opencontrail:collector'))
+
+ if (collectorMinionsInTarget.size() != 0) {
+ def cassandraConfigYaml = readYaml text: salt.getFileContent(pepperEnv, 'I@opencontrail:control:role:primary', '/etc/cassandra/cassandra.yaml')
+
+ def currentCassandraNativeTransportPort = cassandraConfigYaml['native_transport_port'] ?: "9042"
+ def currentCassandraRpcPort = cassandraConfigYaml['rpc_port'] ?: "9160"
+
+ def cassandraNativeTransportPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:port_configdb")
+ def cassandraCassandraRpcPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:rpc_port_configdb")
+
+ if (currentCassandraNativeTransportPort != cassandraNativeTransportPort) {
+ checkCmd += ' | grep -v \'contrail-collector.*(Database:Cassandra connection down)\''
+ }
+
+ if (currentCassandraRpcPort != cassandraCassandraRpcPort) {
+ checkCmd += ' | grep -v \'contrail-alarm-gen.*(Database:Cassandra\\[\\] connection down)\''
+ }
+ }
+ }
+
+ } else {
+ checkCmd = "contrail-status | grep -v == | grep -v FOR | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/"
}
+
+ salt.commandStatus(pepperEnv, target, checkCmd, null, false, true, null, true, 500)
}
+
+def getValueForPillarKey(pepperEnv, target, pillarKey) {
+ def out = salt.getReturnValues(salt.getPillar(pepperEnv, target, pillarKey))
+ if (out == '') {
+ throw new Exception("Cannot get value for ${pillarKey} key on ${target} target")
+ }
+ return out.toString()
+}
+
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -71,9 +106,18 @@
}
stage('Opencontrail controllers upgrade') {
+
+ // Sync data on minions
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server or I@horizon:server', 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server or I@horizon:server', 'saltutil.sync_all', [], null, true)
+
+ // Verify specified target OpenContrail version before upgrade
+ def targetOcVersion = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version")
+ if (!supportedOcTargetVersions.contains(targetOcVersion)) {
+ throw new Exception("Specified OpenContrail version ${targetOcVersion} is not supported by upgrade pipeline. Supported versions: ${supportedOcTargetVersions}")
+ }
+
try {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server or I@horizon:server', 'saltutil.refresh_pillar', [], null, true)
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server or I@horizon:server', 'saltutil.sync_all', [], null, true)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/cassandra.list"], null, true)
salt.enforceState(pepperEnv, 'I@opencontrail:database or I@neutron:server or I@horizon:server', 'linux.system.repo')
@@ -84,11 +128,12 @@
}
try {
- controllerImage = salt.getPillar(pepperEnv, "I@opencontrail:control:role:primary", "docker:client:compose:opencontrail_api:service:controller:image")
- analyticsImage = salt.getPillar(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail_api:service:analytics:image")
- analyticsdbImage = salt.getPillar(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail_api:service:analyticsdb:image")
+ controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "docker:client:compose:opencontrail:service:controller:image")
+ analyticsImage = getValueForPillarKey(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail:service:analytics:image")
+ analyticsdbImage = getValueForPillarKey(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail:service:analyticsdb:image")
+
salt.enforceState(pepperEnv, 'I@opencontrail:database', 'docker.host')
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail.client'])
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'dockerng.pull', [controllerImage])
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsImage])
@@ -126,19 +171,24 @@
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.stop', [service])
}
result = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.directory_exists', ['/var/lib/analyticsdb/data'])['return'][0].values()[0]
- if (result == false) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.makedirs', ['/var/lib/analyticsdb'])
+ // Keep analyticsdb only for 4.0 version
+ if (result == false && targetOcVersion == '4.0') {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.move', ['/var/lib/cassandra', '/var/lib/analyticsdb'])
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.copy', ['/var/lib/zookeeper', '/var/lib/analyticsdb_zookeeper_data','recurse=True'])
}
- check = 'doctrail all contrail-status'
salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'docker.client')
- runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv)
+ if (targetOcVersion == '4.1') {
+ sleep(15)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["doctrail analyticsdb systemctl restart confluent-kafka"], null, true)
+ }
+ checkContrailServices(pepperEnv, targetOcVersion, 'I@opencontrail:collector')
} catch (Exception er) {
common.errorMsg("Opencontrail Analytics failed to be upgraded.")
throw er
}
try {
- check = 'doctrail all contrail-status'
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
for (service in configServices) {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', [service])
@@ -155,8 +205,7 @@
}
salt.enforceState(pepperEnv, 'I@opencontrail:control:role:secondary', 'docker.client')
-
- runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv)
+ checkContrailServices(pepperEnv, targetOcVersion, 'I@opencontrail:control:role:secondary')
sleep(120)
@@ -165,6 +214,7 @@
}
salt.enforceState(pepperEnv, 'I@opencontrail:control:role:primary', 'docker.client')
+ checkContrailServices(pepperEnv, targetOcVersion, 'I@opencontrail:control:role:primary')
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.install', [neutronServerPkgs])
salt.runSaltProcessStep(pepperEnv, 'I@horizon:server', 'pkg.install', [dashboardPanelPkg])
@@ -259,7 +309,7 @@
throw er
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${cmpPkgs} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" ${cmpPkgs} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
@@ -296,7 +346,7 @@
throw er
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${cmpPkgs} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" ${cmpPkgs} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
@@ -345,14 +395,16 @@
}
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:secondary', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
- check = 'contrail-status'
- runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv)
+ def rollbackOcVersion = getValueForPillarKey(pepperEnv, 'I@opencontrail:control:role:primary', '_param:opencontrail_version')
+ checkContrailServices(pepperEnv, rollbackOcVersion, 'I@opencontrail:control:role:secondary or I@opencontrail:collector')
sleep(120)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+ checkContrailServices(pepperEnv, rollbackOcVersion, 'I@opencontrail:control:role:primary')
+
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.remove', [neutronServerPkgs])
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.install', [neutronServerPkgs])
@@ -404,7 +456,7 @@
throw er
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${cmpPkgs} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" ${cmpPkgs} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
@@ -438,7 +490,7 @@
throw er
}
- args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${cmpPkgs} -y;"
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" ${cmpPkgs} -y;"
check = 'contrail-status'
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 443d56b..85b93e9 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -21,88 +21,136 @@
def upgrade(master, target, service, pckg, state) {
stage("Upgrade ${service}") {
- salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true, 5)
- salt.enforceState(master, "${target}", 'linux.system.repo', true)
- common.infoMsg("Upgrade ${service} package")
+ salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState([saltId: master, target: "${target}", state: 'linux.system.repo', output: true, failOnError: true])
+ common.infoMsg("Upgrade ${service} package(s)")
try {
- salt.runSaltProcessStep(master, "${target}", command, ["apt-get install --only-upgrade ${pckg}"], null, true)
+ salt.runSaltProcessStep(master, "${target}", command, ["apt-get install -y -o Dpkg::Options::=\"--force-confold\" ${pckg}"], null, true)
} catch (Exception er) {
errorOccured = true
- common.errorMsg("[ERROR] ${pckg} package was not upgraded.")
- return
+ common.errorMsg("[ERROR] ${pckg} package(s) was not upgraded.")
+ throw er
}
common.infoMsg("Run ${state} state on ${target} nodes")
try {
- salt.enforceState(master, "${target}", ["${state}"], true)
+ salt.enforceState([saltId: master, target: "${target}", state: ["${state}"], output: true, failOnError: true])
} catch (Exception er) {
errorOccured = true
common.errorMsg("[ERROR] ${state} state was executed and failed. Please fix it manually.")
+ throw er
}
- common.infoMsg("Check ${service} service status on the target nodes")
- salt.runSaltProcessStep(master, "${target}", "service.status", ["${service}"], null, true)
- return
+ common.infoMsg("Check ${service} service(s) status on the target nodes")
+ for (s in service.split(" ")){
+ salt.runSaltProcessStep(master, "${target}", "service.status", "${s}", null, true)
+ }
+ }
+}
+
+def verify_es_is_green(master) {
+ common.infoMsg('Verify that the Elasticsearch cluster status is green')
+ try {
+ def retries_wait = 20
+ def retries = 15
+ def elasticsearch_vip
+ def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
+ if(!pillar['return'].isEmpty()) {
+ elasticsearch_vip = pillar['return'][0].values()[0]
+ } else {
+ errorOccured = true
+ common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
+ }
+ pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+ def elasticsearch_port
+ if(!pillar['return'].isEmpty()) {
+ elasticsearch_port = pillar['return'][0].values()[0]
+ } else {
+ errorOccured = true
+ common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
+ }
+ common.retry(retries,retries_wait) {
+ common.infoMsg('Waiting for Elasticsearch to become green..')
+ salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+ }
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("[ERROR] Elasticsearch cluster status is not \'green\'. Please fix it manually.")
+ throw er
}
}
def upgrade_es_kibana(master) {
+ def elasticsearch_version
+ def es_pillar = salt.getPillar(master, "I@elasticsearch:client", '_param:elasticsearch_version')
+ if(!es_pillar['return'].isEmpty()) {
+ elasticsearch_version = es_pillar['return'][0].values()[0]
+ }
stage('Upgrade elasticsearch') {
- try {
- common.infoMsg('Upgrade the Elasticsearch package')
- salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
- salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["apt-get --only-upgrade install elasticsearch"], null, true)
- salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl daemon-reload"], null, true)
- salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl start elasticsearch"], null, true)
- salt.runSaltProcessStep(master, '*', 'saltutil.sync_all', [], null, true)
- } catch (Exception er) {
- errorOccured = true
- common.errorMsg("[ERROR] Elasticsearch upgrade failed. Please fix it manually.")
- return
- }
- common.infoMsg('Verify that the Elasticsearch cluster status is green')
- try {
- def retries_wait = 20
- def retries = 15
- def elasticsearch_vip
- def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
- if(!pillar['return'].isEmpty()) {
- elasticsearch_vip = pillar['return'][0].values()[0]
- } else {
+ if (elasticsearch_version == '5') {
+ try {
+ common.infoMsg('Upgrade the Elasticsearch package')
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["apt-get --only-upgrade install elasticsearch"], null, true)
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl daemon-reload"], null, true)
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl start elasticsearch"], null, true)
+ salt.runSaltProcessStep(master, '*', 'saltutil.sync_all', [], null, true)
+ verify_es_is_green(master)
+ } catch (Exception er) {
errorOccured = true
- common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
+ common.errorMsg("[ERROR] Elasticsearch upgrade failed. Please fix it manually.")
+ throw er
}
- pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
- def elasticsearch_port
- if(!pillar['return'].isEmpty()) {
- elasticsearch_port = pillar['return'][0].values()[0]
- } else {
+ } else {
+ try {
+ salt.runSaltProcessStep(master, "*", 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState([saltId: master, target: "I@elasticsearch:server", state: 'linux.system.repo', output: true, failOnError: true])
+ salt.runSaltProcessStep(master, 'I@elasticsearch:client', command, ["apt-get install -y -o Dpkg::Options::=\"--force-confold\" python-elasticsearch"], null, true)
+ salt.enforceState([saltId: master, target: "I@elasticsearch:server", state: 'salt.minion', output: true, failOnError: true])
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["export ES_PATH_CONF=/etc/elasticsearch; apt-get install -y -o Dpkg::Options::=\"--force-confold\" elasticsearch"], null, true)
+ salt.enforceState([saltId: master, target: "I@elasticsearch:server", state: 'elasticsearch.server', output: true, failOnError: true])
+ verify_es_is_green(master)
+ salt.enforceState([saltId: master, target: "I@elasticsearch:client", state: 'elasticsearch.client.update_index_templates', output: true, failOnError: true])
+ salt.enforceState([saltId: master, target: "I@elasticsearch:client", state: 'elasticsearch.client', output: true, failOnError: true])
+ } catch (Exception er) {
errorOccured = true
- common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
+ common.errorMsg("[ERROR] Elasticsearch upgrade failed. Please fix it manually.")
+ throw er
}
- common.retry(retries,retries_wait) {
- common.infoMsg('Waiting for Elasticsearch to become green..')
- salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
- }
- } catch (Exception er) {
- errorOccured = true
- common.errorMsg("[ERROR] Elasticsearch cluster status is not \'green\'. Please fix it manually.")
- return
}
}
stage('Upgrade kibana') {
- try {
- common.infoMsg('Upgrade the Kibana package')
- salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl stop kibana"], null, true)
- salt.runSaltProcessStep(master, 'I@kibana:server', command, ["apt-get --only-upgrade install kibana"], null, true)
- salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl start kibana"], null, true)
- } catch (Exception er) {
- errorOccured = true
- common.errorMsg("[ERROR] Kibana upgrade failed. Please fix it manually.")
- return
+ def kibana_version
+ def kibana_pillar = salt.getPillar(master, "I@kibana:client", '_param:kibana_version')
+ if(!kibana_pillar['return'].isEmpty()) {
+ kibana_version = kibana_pillar['return'][0].values()[0]
+ }
+ if (kibana_version == '5') {
+ try {
+ common.infoMsg('Upgrade the Kibana package')
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl stop kibana"], null, true)
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["apt-get --only-upgrade install kibana"], null, true)
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl start kibana"], null, true)
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("[ERROR] Kibana upgrade failed. Please fix it manually.")
+ throw er
+ }
+ } else {
+ try {
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl stop kibana"], null, true)
+ salt.enforceStateWithExclude([saltId: master, target: "I@kibana:server", state: "kibana.server", excludedStates: "[{'id': 'kibana_service'}]"])
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["apt-get install -y -o Dpkg::Options::=\"--force-confold\" kibana"], null, true)
+ salt.enforceState([saltId: master, target: "I@kibana:server", state: 'kibana.server', output: true, failOnError: true])
+ salt.enforceState([saltId: master, target: "I@kibana:client", state: 'kibana.client', output: true, failOnError: true])
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("[ERROR] Kibana upgrade failed. Please fix it manually.")
+ throw er
+ }
}
common.infoMsg("Check kibana status on the target nodes")
salt.runSaltProcessStep(master, "I@kibana:server", "service.status", ["kibana"], null, true)
- return
}
}
timeout(time: 12, unit: 'HOURS') {
@@ -112,11 +160,25 @@
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ stage('Update grains and mine') {
+ salt.enforceState([saltId: pepperEnv, target: '*', state: 'salt.minion.grains'])
+ salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_modules')
+ salt.runSaltProcessStep(pepperEnv, '*', 'mine.update')
+ sleep(30)
+ }
+
+ if (salt.testTarget(pepperEnv, "I@ceph:mon")) {
+ stage('Enable Ceph prometheus plugin') {
+ salt.enforceState([saltId: pepperEnv, target: 'I@ceph:mon', state: "ceph.mgr", output: true, failOnError: true])
+ }
+ }
+
if (STAGE_UPGRADE_SYSTEM_PART.toBoolean() == true && !errorOccured) {
upgrade(pepperEnv, "I@telegraf:agent or I@telegraf:remote_agent", "telegraf", "telegraf", "telegraf")
upgrade(pepperEnv, "I@fluentd:agent", "td-agent", "td-agent td-agent-additional-plugins", "fluentd")
if (salt.testTarget(pepperEnv, "I@prometheus:relay")) {
- upgrade(pepperEnv, "I@prometheus:relay", "prometheus-relay", "prometheus-relay", "prometheus")
+ upgrade(pepperEnv, "I@prometheus:relay", "prometheus prometheus-relay", "prometheus-bin prometheus-relay", "prometheus")
+ salt.runSaltProcessStep(pepperEnv, "I@prometheus:relay", "service.restart", "prometheus", null, true)
}
if (salt.testTarget(pepperEnv, "I@prometheus:exporters:libvirt")) {
upgrade(pepperEnv, "I@prometheus:exporters:libvirt", "libvirt-exporter", "libvirt-exporter", "prometheus")
@@ -131,25 +193,23 @@
}
if (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured) {
-
stage('Upgrade docker components') {
-
try {
common.infoMsg('Disable and remove the previous versions of monitoring services')
salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm monitoring"], null, true)
common.infoMsg('Rebuild the Prometheus configuration')
- salt.enforceState(pepperEnv, 'I@docker:swarm and I@prometheus:server', 'prometheus')
+ salt.enforceState([saltId: pepperEnv, target: 'I@docker:swarm and I@prometheus:server', state: 'prometheus'])
common.infoMsg('Disable and remove the previous version of Grafana')
salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm dashboard"], null, true)
common.infoMsg('Start the monitoring services')
- salt.enforceState(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'docker')
+ salt.enforceState([saltId: pepperEnv, target: 'I@docker:swarm:role:master and I@prometheus:server', state: 'docker'])
salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
common.infoMsg('Refresh the Grafana dashboards')
- salt.enforceState(pepperEnv, 'I@grafana:client', 'grafana.client')
+ salt.enforceState([saltId: pepperEnv, target: 'I@grafana:client', state: 'grafana.client'])
} catch (Exception er) {
errorOccured = true
common.errorMsg("[ERROR] Upgrade of docker components failed. Please fix it manually.")
- return
+ throw er
}
}
}
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 52c7d79..404c0d0 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -106,8 +106,8 @@
img.inside("-v ${env.WORKSPACE}/:/formula/ -u root:root --cpus=4 --ulimit nofile=4096:8192") {
sh('''#!/bin/bash -xe
cd /etc/apt/
- echo "deb [arch=amd64] http://cz.archive.ubuntu.com/ubuntu xenial main restricted universe" > sources.list
- echo "deb [arch=amd64] http://cz.archive.ubuntu.com/ubuntu xenial-updates main restricted universe" >> sources.list
+ echo "deb [arch=amd64] http://mirror.mirantis.com/nightly/ubuntu xenial main restricted universe" > sources.list
+ echo "deb [arch=amd64] http://mirror.mirantis.com/nightly/ubuntu xenial-updates main restricted universe" >> sources.list
echo 'Acquire::Languages "none";' > apt.conf.d/docker-no-languages
echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > apt.conf.d/docker-gzip-indexes
echo 'APT::Get::Install-Recommends "false"; APT::Get::Install-Suggests "false";' > apt.conf.d/docker-recommends
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 445c097..5bc0ab2 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -3,16 +3,17 @@
* Update Salt environment pipeline
*
* Expected parameters:
- * SALT_MASTER_URL Salt API server location
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
* TARGET_MCP_VERSION Version of MCP to upgrade to
- * UPGRADE_SALTSTACK Upgrade SaltStack packages to new version.
- * UPDATE_CLUSTER_MODEL Update MCP version parameter in cluster model
- * UPDATE_PIPELINES Update pipeline repositories on Gerrit
- * UPDATE_LOCAL_REPOS Update local repositories
+ * GIT_REFSPEC Git repo ref to be used
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_URL Salt API server location
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * UPGRADE_SALTSTACK Upgrade SaltStack packages to new version.
+ * UPDATE_CLUSTER_MODEL Update MCP version parameter in cluster model
+ * UPDATE_PIPELINES Update pipeline repositories on Gerrit
+ * UPDATE_LOCAL_REPOS Update local repositories
*/
-// Load shared libs
salt = new com.mirantis.mk.Salt()
common = new com.mirantis.mk.Common()
python = new com.mirantis.mk.Python()
@@ -32,9 +33,9 @@
}
def updateSaltStack(target, pkgs) {
- try {
+ // wait 2 mins when salt-* packages are updated which leads to salt-* services restart
+ common.retry(2, 120) {
salt.runSaltProcessStep(venvPepper, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 5)
- } catch (Exception ex) {
}
common.retry(20, 60) {
@@ -66,6 +67,28 @@
archiveArtifacts artifacts: "$filename"
}
+def validateReclassModel(ArrayList saltMinions, String suffix) {
+ try {
+ for(String minion in saltMinions) {
+ common.infoMsg("Reclass model validation for minion ${minion}...")
+ def ret = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false)
+ def reclassInv = ret.values()[0]
+ writeFile file: "inventory-${minion}-${suffix}.out", text: reclassInv.toString()
+ }
+ } catch (Exception e) {
+ common.errorMsg('Can not validate current Reclass model. Inspect failed minion manually.')
+ error(e)
+ }
+}
+
+def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix='before', String newSuffix='after') {
+ for(String minion in saltMinions) {
+ def fileName = "reclass-model-${minion}-diff.out"
+ sh "diff -u inventory-${minion}-${oldSuffix}.out inventory-${minion}-${newSuffix}.out > ${fileName} || true"
+ archiveArtifacts artifacts: "${fileName}"
+ }
+}
+
if (common.validInputParam('PIPELINE_TIMEOUT') && env.PIPELINE_TIMEOUT.isInteger()) {
pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
}
@@ -87,14 +110,52 @@
targetMcpVersion = env.TARGET_MCP_VERSION
}
// end bw comp. for 2018.X => 2018.11 release
- def gitTargetMcpVersion = targetMcpVersion
- if (targetMcpVersion == 'testing') {
+ def gitTargetMcpVersion = env.getProperty('GIT_REFSPEC')
+ if (targetMcpVersion in ['testing', 'proposed']) {
gitTargetMcpVersion = 'master'
common.warningMsg("gitTargetMcpVersion has been changed to:${gitTargetMcpVersion}")
+ } else if (!gitTargetMcpVersion) {
+ // backward compatibility for 2018.11.0
+ gitTargetMcpVersion = "release/${targetMcpVersion}"
}
- python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ def saltMastURL = ''
+ def saltMastCreds = ''
+ def upgradeSaltStack = ''
+ def updateClusterModel = ''
+ def updatePipelines = ''
+ def updateLocalRepos = ''
+ def reclassSystemBranch = ''
+ def driteTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
+ if (driteTrainParamsYaml) {
+ def driteTrainParams = readYaml text: driteTrainParamsYaml
+ saltMastURL = driteTrainParams.get('SALT_MASTER_URL')
+ defsaltMastCreds = driteTrainParams.get('SALT_MASTER_CREDENTIALS')
+ upgradeSaltStack = driteTrainParams.get('UPGRADE_SALTSTACK', false).toBoolean()
+ updateClusterModel = driteTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
+ updatePipelines = driteTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
+ updateLocalRepos = driteTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
+ reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', gitTargetMcpVersion)
+ } else {
+ // backward compatibility for 2018.11.0
+ saltMastURL = env.getProperty('SALT_MASTER_URL')
+ saltMastCreds = env.getProperty('SALT_MASTER_CREDENTIALS')
+ upgradeSaltStack = env.getProperty('UPGRADE_SALTSTACK', false).toBoolean()
+ updateClusterModel = env.getProperty('UPDATE_CLUSTER_MODEL', false).toBoolean()
+ updatePipelines = env.getProperty('UPDATE_PIPELINES', false).toBoolean()
+ updateLocalRepos = env.getProperty('UPDATE_LOCAL_REPOS', false).toBoolean()
+ reclassSystemBranch = gitTargetMcpVersion
+ }
- stage("Update Reclass") {
+ python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
+
+ def inventoryBeforeFilename = "reclass-inventory-before.out"
+ def inventoryAfterFilename = "reclass-inventory-after.out"
+
+ def minions = salt.getMinions(venvPepper, '*')
+
+ stage("Update Reclass and Salt-Formulas ") {
+ validateReclassModel(minions, 'before')
+
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
try {
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
@@ -102,7 +163,7 @@
catch (Exception ex) {
error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
}
- if (UPDATE_CLUSTER_MODEL.toBoolean()) {
+ if (updateClusterModel) {
common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
def dateTime = common.getDatetime()
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
@@ -110,9 +171,13 @@
// Do the same, for deprecated variable-duplicate
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
"grep -r --exclude-dir=aptly -l 'apt_mk_version: .*' * | xargs --no-run-if-empty sed -i 's/apt_mk_version: .*/apt_mk_version: \"$targetMcpVersion\"/g'")
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${gitTargetMcpVersion}")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'jenkins_pipelines_branch: .*' * | xargs --no-run-if-empty sed -i 's/jenkins_pipelines_branch: .*/jenkins_pipelines_branch: \"$gitTargetMcpVersion\"/g'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
// Add new defaults
common.infoMsg("Add new defaults")
+ salt.cmdRun(venvPepper, 'I@salt:master', "grep '^ mcp_version: ' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
+ "sed -i 's/^ _param:/ _param:\\n mcp_version: \"$targetMcpVersion\"/' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
"sed -i 's/^classes:/classes:\\n- system.defaults/' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
common.infoMsg("The following changes were made to the cluster model and will be commited. " +
@@ -121,10 +186,57 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && " +
"git add -u && git commit --allow-empty -m 'Cluster model update to the release $targetMcpVersion on $dateTime'")
}
+
+ try {
+ common.infoMsg('Perform: UPDATE Salt Formulas')
+ salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
+ def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas",'salt.master.env'])
+ } catch (Exception updateErr) {
+ common.warningMsg(updateErr)
+ common.warningMsg('Failed to update Salt Formulas repos/packages. Check current available documentation on https://docs.mirantis.com/mcp/latest/, how to update packages.')
+ input message: 'Continue anyway?'
+ }
+
+ archiveReclassInventory(inventoryBeforeFilename)
+
+ try {
+ common.infoMsg('Perform: UPDATE Reclass package')
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'pkg.install', ["reclass"])
+ } catch (Exception updateErr) {
+ common.warningMsg(updateErr)
+ common.warningMsg('Failed to update Reclass package. Check current available documentation on https://docs.mirantis.com/mcp/latest/, how to update packages.')
+ input message: 'Continue anyway?'
+ }
+
+ salt.fullRefresh(venvPepper, 'I@salt:master')
salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true)
+ try {
+ salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
+ }
+ catch (Exception ex) {
+ error("Reclass fails rendering. Pay attention to your cluster model.")
+ }
+
+ salt.fullRefresh(venvPepper, '*')
+
+ try {
+ salt.cmdRun(venvPepper, 'I@salt:master', "reclass-salt --top")
+ }
+ catch (Exception ex) {
+ error("Reclass fails rendering. Pay attention to your cluster model.")
+ }
+
+ archiveReclassInventory(inventoryAfterFilename)
+
+ sh "diff -u $inventoryBeforeFilename $inventoryAfterFilename > reclass-inventory-diff.out || true"
+ archiveArtifacts artifacts: "reclass-inventory-diff.out"
+
+ validateReclassModel(minions, 'after')
+ archiveReclassModelChanges(minions)
}
- if (UPDATE_LOCAL_REPOS.toBoolean()) {
+ if (updateLocalRepos) {
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
stage("Update local repos") {
common.infoMsg("Updating local repositories")
@@ -165,61 +277,14 @@
}
stage("Update Drivetrain") {
- salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$targetMcpVersion/4' /etc/apt/sources.list.d/mcp_salt.list")
- salt.cmdRun(venvPepper, 'I@salt:master', "apt-get -o Dir::Etc::sourcelist='/etc/apt/sources.list.d/mcp_salt.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update")
- // Workaround for PROD-22108
- salt.cmdRun(venvPepper, 'I@salt:master', "apt-get purge -y salt-formula-octavia && " +
- "apt-get install -y salt-formula-octavia")
- // End workaround for PROD-22108
- salt.cmdRun(venvPepper, 'I@salt:master', "apt-get install -y --allow-downgrades salt-formula-*")
+ if (upgradeSaltStack) {
+ updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
- def inventoryBeforeFilename = "reclass-inventory-before.out"
- def inventoryAfterFilename = "reclass-inventory-after.out"
-
- archiveReclassInventory(inventoryBeforeFilename)
-
- salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$targetMcpVersion/4' /etc/apt/sources.list.d/mcp_extra.list")
- salt.cmdRun(venvPepper, 'I@salt:master', "apt-get -o Dir::Etc::sourcelist='/etc/apt/sources.list.d/mcp_extra.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update")
- salt.cmdRun(venvPepper, 'I@salt:master', "apt-get install -y --allow-downgrades reclass")
-
- salt.fullRefresh(venvPepper, 'I@salt:master')
-
- try {
- salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
- }
- catch (Exception ex) {
- error("Reclass fails rendering. Pay attention to your cluster model.")
- }
-
- salt.fullRefresh(venvPepper, '*')
-
- try {
- salt.cmdRun(venvPepper, 'I@salt:master', "reclass-salt --top")
- }
- catch (Exception ex) {
- error("Reclass fails rendering. Pay attention to your cluster model.")
- }
-
- archiveReclassInventory(inventoryAfterFilename)
-
- sh "diff -u $inventoryBeforeFilename $inventoryAfterFilename > reclass-inventory-diff.out || true"
- archiveArtifacts artifacts: "reclass-inventory-diff.out"
-
- if (UPGRADE_SALTSTACK.toBoolean()) {
salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
-
- // as salt package update leads to service restart and token changing - re-create pepperEnv and rerun package update
- try {
- updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
- } catch (Exception e) {
- python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
- }
-
updateSaltStack("I@salt:minion and not I@salt:master", '["salt-minion"]')
}
- if (UPDATE_PIPELINES.toBoolean()) {
+ if (updatePipelines) {
triggerMirrorJob("git-mirror-downstream-mk-pipelines")
triggerMirrorJob("git-mirror-downstream-pipeline-library")
}