Merge "Add upload-images-to-s3 pipeline"
diff --git a/.gitreview b/.gitreview
index 9075ea3..ce0aa41 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
-host=gerrit.mcp.mirantis.net
+host=gerrit.mcp.mirantis.com
port=29418
project=mk/mk-pipelines.git
diff --git a/build-debian-packages-prometheus-relay.groovy b/build-debian-packages-prometheus-relay.groovy
index f101f57..ea19c9d 100644
--- a/build-debian-packages-prometheus-relay.groovy
+++ b/build-debian-packages-prometheus-relay.groovy
@@ -13,7 +13,7 @@
sh("rm -rf * || true")
}
- def workingDir = "src/gerrit.mcp.mirantis.net/debian"
+ def workingDir = "src/gerrit.mcp.mirantis.com/debian"
stage("checkout") {
git.checkoutGitRepository(
"${workingDir}/prometheus-relay",
@@ -53,7 +53,7 @@
export GOROOT=\$PWD/go &&
export GOPATH=\$PWD &&
export PATH=\$PATH:\$GOPATH/bin:\$GOROOT/bin &&
- cd src/gerrit.mcp.mirantis.net/debian/prometheus-relay &&
+ cd src/gerrit.mcp.mirantis.com/debian/prometheus-relay &&
make""")
}
archiveArtifacts artifacts: "${workingDir}/prometheus-relay/build/*.deb"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index bf7e238..aadc7c9 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -199,7 +199,7 @@
envParams.put('cfg_bootstrap_extra_repo_params', BOOTSTRAP_EXTRA_REPO_PARAMS)
}
- // put extra salt-formulas
+ // put extra salt-formulas # FIXME: looks like some outdated logic. See #PROD-23127
if (common.validInputParam('EXTRA_FORMULAS')) {
common.infoMsg("Setting extra salt-formulas to ${EXTRA_FORMULAS}")
envParams.put('cfg_extra_formulas', EXTRA_FORMULAS)
@@ -374,8 +374,8 @@
}
// ensure certificates are generated properly
- salt.runSaltProcessStep(venvPepper, "* ${extra_tgt}", 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(venvPepper, "* ${extra_tgt}", ['salt.minion.cert'], true)
+ salt.runSaltProcessStep(venvPepper, "I@kubernetes:* ${extra_tgt}", 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(venvPepper, "I@kubernetes:* ${extra_tgt}", ['salt.minion.cert'], true)
}
if (common.checkContains('STACK_INSTALL', 'contrail')) {
@@ -423,6 +423,23 @@
}
orchestrate.installKubernetesCompute(venvPepper, extra_tgt)
+ // Setup kubernetes addons for opencontrail. More info in the definition of the func.
+ orchestrate.setupKubeAddonForContrail(venvPepper, extra_tgt)
+ }
+ }
+
+ // install ceph
+ if (common.checkContains('STACK_INSTALL', 'ceph')) {
+ stage('Install Ceph MONs') {
+ orchestrate.installCephMon(venvPepper, "I@ceph:mon ${extra_tgt}", extra_tgt)
+ }
+
+ stage('Install Ceph OSDs') {
+ orchestrate.installCephOsd(venvPepper, "I@ceph:osd ${extra_tgt}", true, extra_tgt)
+ }
+
+ stage('Install Ceph clients') {
+ orchestrate.installCephClient(venvPepper, extra_tgt)
}
}
@@ -474,20 +491,8 @@
}
- // install ceph
+ // connect ceph
if (common.checkContains('STACK_INSTALL', 'ceph')) {
- stage('Install Ceph MONs') {
- orchestrate.installCephMon(venvPepper, "I@ceph:mon ${extra_tgt}", extra_tgt)
- }
-
- stage('Install Ceph OSDs') {
- orchestrate.installCephOsd(venvPepper, "I@ceph:osd ${extra_tgt}", true, extra_tgt)
- }
-
-
- stage('Install Ceph clients') {
- orchestrate.installCephClient(venvPepper, extra_tgt)
- }
stage('Connect Ceph') {
orchestrate.connectCeph(venvPepper, extra_tgt)
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 2729d98..8802c1b 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -388,7 +388,7 @@
} else {
def salt = new com.mirantis.mk.Salt()
for (s in services) {
- def outputServicesStr = salt.getReturnValues(salt.cmdRun(pepperEnv, "${probe}*", "service --status-all | grep ${s} | awk \'{print \$4}\'"))
+ def outputServicesStr = salt.getReturnValues(salt.cmdRun(pepperEnv, probe, "service --status-all | grep ${s} | awk \'{print \$4}\'"))
def servicesList = outputServicesStr.tokenize("\n").init() //init() returns the items from the Iterable excluding the last item
if (servicesList) {
for (name in servicesList) {
diff --git a/deploy-aws-k8s-kqueen-pipeline.groovy b/deploy-aws-k8s-kqueen-pipeline.groovy
index 0a5903e..8fd92bf 100644
--- a/deploy-aws-k8s-kqueen-pipeline.groovy
+++ b/deploy-aws-k8s-kqueen-pipeline.groovy
@@ -124,8 +124,8 @@
}
// ensure certificates are generated properly
- salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
+ salt.runSaltProcessStep(venvPepper, 'I@kubernetes:*', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(venvPepper, 'I@kubernetes:*', ['salt.minion.cert'], true)
orchestrate.installKubernetesInfra(venvPepper)
}
diff --git a/deploy-heat-k8s-kqueen-pipeline.groovy b/deploy-heat-k8s-kqueen-pipeline.groovy
index 7071b96..6e5705e 100644
--- a/deploy-heat-k8s-kqueen-pipeline.groovy
+++ b/deploy-heat-k8s-kqueen-pipeline.groovy
@@ -122,8 +122,8 @@
}
// ensure certificates are generated properly
- salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
+ salt.runSaltProcessStep(venvPepper, 'I@kubernetes:*', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(venvPepper, 'I@kubernetes:*', ['salt.minion.cert'], true)
orchestrate.installKubernetesInfra(venvPepper)
}
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index b94928e..a39051f 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -9,93 +9,101 @@
* REGISTRY_URL - Docker registry URL (can be empty)
* ARTIFACTORY_URL - URL to artifactory
* ARTIFACTORY_NAMESPACE - Artifactory namespace (oss, cicd,...)
+ * UPLOAD_TO_DOCKER_HUB - True\False
* REGISTRY_CREDENTIALS_ID - Docker hub credentials id
*
-**/
+ **/
def common = new com.mirantis.mk.Common()
def gerrit = new com.mirantis.mk.Gerrit()
def git = new com.mirantis.mk.Git()
def dockerLib = new com.mirantis.mk.Docker()
def artifactory = new com.mirantis.mcp.MCPArtifactory()
+
+slaveNode = env.SLAVE_NODE ?: 'docker'
+uploadToDockerHub = env.UPLOAD_TO_DOCKER_HUB ?: false
+
timeout(time: 12, unit: 'HOURS') {
- node("docker") {
- def workspace = common.getWorkspace()
- def imageTagsList = IMAGE_TAGS.tokenize(" ")
- try{
+ node(slaveNode) {
+ def workspace = common.getWorkspace()
+ def imageTagsList = env.IMAGE_TAGS.tokenize(" ")
+ try {
- def buildArgs = []
- try {
- buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
- } catch (Throwable e) {
- buildArgs = []
- }
- def dockerApp
- stage("checkout") {
- git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
- }
+ def buildArgs = []
+ try {
+ buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
+ } catch (Throwable e) {
+ buildArgs = []
+ }
+ def dockerApp
+ stage("checkout") {
+ git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
+ }
- if (IMAGE_BRANCH == "master") {
- try {
- def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
- def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
- imageTagsList << tag
- revision = revision ? revision : "0"
- if(Integer.valueOf(revision) > 0){
- imageTagsList << "${tag}-${revision}"
+ if (IMAGE_BRANCH == "master") {
+ try {
+ def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
+ def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
+ imageTagsList << tag
+ revision = revision ? revision : "0"
+ if (Integer.valueOf(revision) > 0) {
+ imageTagsList << "${tag}-${revision}"
+ }
+ if (!imageTagsList.contains("latest")) {
+ imageTagsList << "latest"
+ }
+ } catch (Exception e) {
+ common.infoMsg("Impossible to find any tag")
+ }
}
- if (!imageTagsList.contains("latest")) {
- imageTagsList << "latest"
- }
- } catch (Exception e) {
- common.infoMsg("Impossible to find any tag")
- }
- }
- stage("build") {
- common.infoMsg("Building docker image ${IMAGE_NAME}")
- dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
- if(!dockerApp){
- throw new Exception("Docker build image failed")
- }
- }
- stage("upload to docker hub"){
- docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
- for(int i=0;i<imageTagsList.size();i++){
- common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]} to dockerhub")
- dockerApp.push(imageTagsList[i])
+ stage("build") {
+ common.infoMsg("Building docker image ${IMAGE_NAME}")
+ dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
+ if (!dockerApp) {
+ throw new Exception("Docker build image failed")
+ }
}
- }
+ stage("upload to docker hub") {
+ if (uploadToDockerHub) {
+ docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
+ for (int i = 0; i < imageTagsList.size(); i++) {
+ common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]} to dockerhub")
+ dockerApp.push(imageTagsList[i])
+ }
+ }
+ } else {
+ common.infoMsg('upload to docker hub skipped')
+ }
+ }
+ stage("upload to artifactory") {
+ if (common.validInputParam("ARTIFACTORY_URL") && common.validInputParam("ARTIFACTORY_NAMESPACE")) {
+ def artifactoryName = "mcp-ci";
+ def artifactoryServer = Artifactory.server(artifactoryName)
+ def shortImageName = IMAGE_NAME
+ if (IMAGE_NAME.contains("/")) {
+ shortImageName = IMAGE_NAME.tokenize("/")[1]
+ }
+ for (imageTag in imageTagsList) {
+ sh "docker tag ${IMAGE_NAME}:${imageTagsList[0]} ${ARTIFACTORY_URL}/mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}:${imageTag}"
+ for (artifactoryRepo in ["docker-dev-local", "docker-prod-local"]) {
+ common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTag} to artifactory ${artifactoryName} using repo ${artifactoryRepo}")
+ artifactory.uploadImageToArtifactory(artifactoryServer, ARTIFACTORY_URL,
+ "mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}",
+ imageTag, artifactoryRepo)
+ }
+ }
+ } else {
+ common.warningMsg("ARTIFACTORY_URL not given, upload to artifactory skipped")
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ common.sendNotification(currentBuild.result, "", ["slack"])
}
- stage("upload to artifactory"){
- if(common.validInputParam("ARTIFACTORY_URL") && common.validInputParam("ARTIFACTORY_NAMESPACE")) {
- def artifactoryName = "mcp-ci";
- def artifactoryServer = Artifactory.server(artifactoryName)
- def shortImageName = IMAGE_NAME
- if (IMAGE_NAME.contains("/")) {
- shortImageName = IMAGE_NAME.tokenize("/")[1]
- }
- for (imageTag in imageTagsList) {
- sh "docker tag ${IMAGE_NAME} ${ARTIFACTORY_URL}/mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}:${imageTag}"
- for(artifactoryRepo in ["docker-dev-local", "docker-prod-local"]){
- common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTag} to artifactory ${artifactoryName} using repo ${artifactoryRepo}")
- artifactory.uploadImageToArtifactory(artifactoryServer, ARTIFACTORY_URL,
- "mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}",
- imageTag, artifactoryRepo)
- }
- }
- }else{
- common.warningMsg("ARTIFACTORY_URL not given, upload to artifactory skipped")
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- common.sendNotification(currentBuild.result,"",["slack"])
}
- }
}
-
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 07a80e7..92fea8e 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -8,51 +8,103 @@
* REGISTRY_URL Target Docker Registry URL
* IMAGE_TAG Tag to use when pushing images
* SOURCE_IMAGE_TAG Tag to use when pulling images(optional,if SOURCE_IMAGE_TAG has been found)
+ * SET_DEFAULT_ARTIFACTORY_PROPERTIES Add extra props. directly to artifactory,
* IMAGE_LIST List of images to mirror
+ * Example: docker.elastic.co/elasticsearch/elasticsearch:5.4.1 docker-prod-local.docker.mirantis.net/mirantis/external/docker.elastic.co/elasticsearch
+ * docker.elastic.co/elasticsearch/elasticsearch:SUBS_SOURCE_IMAGE_TAG docker-prod-local.docker.mirantis.net/mirantis/external/elasticsearch:${IMAGE_TAG}* Will be proceed like:
+ * docker tag docker.elastic.co/elasticsearch/elasticsearch:5.4.1 docker-prod-local.docker.mirantis.net/mirantis/external/docker.elastic.co/elasticsearch/elasticsearch:5.4.1
+ *
*
*/
-import java.util.regex.Pattern;
+import java.util.regex.Pattern
+import groovy.json.JsonSlurper
-def common = new com.mirantis.mk.Common()
+common = new com.mirantis.mk.Common()
+external = false
+externalMarker = '/mirantis/external/'
-@NonCPS
+slaveNode = env.SLAVE_NODE ?: 'docker'
+setDefaultArtifactoryProperties = env.SET_DEFAULT_ARTIFACTORY_PROPERTIES ?: true
+
def getImageName(String image) {
def regex = Pattern.compile('(?:.+/)?([^:]+)(?::.+)?')
def matcher = regex.matcher(image)
- if(matcher.find()){
+ if (matcher.find()) {
def imageName = matcher.group(1)
return imageName
- }else{
- throw new IllegalArgumentException("Wrong format of image name.")
+ } else {
+ error("Wrong format of image name.")
}
}
-timeout(time: 12, unit: 'HOURS') {
- node("docker") {
+
+timeout(time: 4, unit: 'HOURS') {
+ node(slaveNode) {
try {
- stage("Mirror Docker Images"){
- def creds = common.getPasswordCredentials(TARGET_REGISTRY_CREDENTIALS_ID)
- sh "docker login --username=${creds.username} --password=${creds.password.toString()} ${REGISTRY_URL}"
+ stage("Mirror Docker Images") {
+
def images = IMAGE_LIST.tokenize('\n')
- def imageName, imagePath, targetRegistry, imageArray
- for (image in images){
- if(image.trim().indexOf(' ') == -1){
- throw new IllegalArgumentException("Wrong format of image and target repository input")
+ def imageName, sourceImage, targetRegistryPath, imageArray
+ for (image in images) {
+ if (image.trim().indexOf(' ') == -1) {
+ error("Wrong format of image and target repository input")
}
imageArray = image.trim().tokenize(' ')
- imagePath = imageArray[0]
- if (imagePath.contains('SUBS_SOURCE_IMAGE_TAG')) {
- common.warningMsg("Replacing SUBS_SOURCE_IMAGE_TAG => ${SOURCE_IMAGE_TAG}")
- imagePath = imagePath.replace('SUBS_SOURCE_IMAGE_TAG', SOURCE_IMAGE_TAG)
+ sourceImage = imageArray[0]
+ if (sourceImage.contains('SUBS_SOURCE_IMAGE_TAG')) {
+ common.warningMsg("Replacing SUBS_SOURCE_IMAGE_TAG => ${env.SOURCE_IMAGE_TAG}")
+ sourceImage = sourceImage.replace('SUBS_SOURCE_IMAGE_TAG', env.SOURCE_IMAGE_TAG)
}
- targetRegistry = imageArray[1]
- imageName = getImageName(imagePath)
- sh """docker pull ${imagePath}
- docker tag ${imagePath} ${targetRegistry}/${imageName}:${IMAGE_TAG}
- docker push ${targetRegistry}/${imageName}:${IMAGE_TAG}"""
+ targetRegistryPath = imageArray[1]
+ targetRegistry = imageArray[1].split('/')[0]
+ imageName = getImageName(sourceImage)
+ targetImageFull = "${targetRegistryPath}/${imageName}:${env.IMAGE_TAG}"
+ srcImage = docker.image(sourceImage)
+ srcImage.pull()
+ // Use sh-docker call for tag, due magic code in plugin:
+ // https://github.com/jenkinsci/docker-workflow-plugin/blob/docker-workflow-1.17/src/main/resources/org/jenkinsci/plugins/docker/workflow/Docker.groovy#L168-L170
+ sh("docker tag ${srcImage.id} ${targetImageFull}")
+ common.infoMsg("Attempt to push docker image into remote registry: ${env.REGISTRY_URL}")
+ docker.withRegistry(env.REGISTRY_URL, env.TARGET_REGISTRY_CREDENTIALS_ID) {
+ sh("docker push ${targetImageFull}")
+ }
+ if (targetImageFull.contains(externalMarker)) {
+ external = true
+ }
+
+ if (setDefaultArtifactoryProperties) {
+ common.infoMsg("Processing artifactory props for : ${targetImageFull}")
+ LinkedHashMap artifactoryProperties = [:]
+ // Get digest of pushed image
+ String unique_image_id = sh(
+ script: "docker inspect --format='{{index .RepoDigests 0}}' '${targetImageFull}'",
+ returnStdout: true,
+ ).trim()
+ def image_sha256 = unique_image_id.tokenize(':')[1]
+ def ret = new URL("https://${targetRegistry}/artifactory/api/search/checksum?sha256=${image_sha256}").getText()
+ // Most probably, we would get many images, especially for external images. We need to guess
+ // exactly one, which we pushing now
+ guessImage = targetImageFull.replace(':', '/').replace(targetRegistry, '')
+ ArrayList img_data = new JsonSlurper().parseText(ret)['results']
+ img_data*.uri.each { imgUrl ->
+ if (imgUrl.contains(guessImage)) {
+ artifactoryProperties = [
+ 'com.mirantis.targetTag' : env.IMAGE_TAG,
+ 'com.mirantis.uniqueImageId': unique_image_id,
+ ]
+ if (external) {
+ artifactoryProperties << ['com.mirantis.externalImage': external]
+ }
+ common.infoMsg("artifactoryProperties=> ${artifactoryProperties}")
+ // Call pipeline-library routine to set properties
+ def mcp_artifactory = new com.mirantis.mcp.MCPArtifactory()
+ mcp_artifactory.setProperties(imgUrl - '/manifest.json', artifactoryProperties)
+ }
+ }
+ }
}
}
} catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
+ // Stub for future processing
currentBuild.result = "FAILURE"
throw e
}
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index e42524b..aeaee9a 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -3,79 +3,105 @@
* CREDENTIALS_ID - Gerrit credentails ID
* JOBS_NAMESPACE - Gerrit gating jobs namespace (mk, contrail, ...)
*
-**/
+ **/
def common = new com.mirantis.mk.Common()
def gerrit = new com.mirantis.mk.Gerrit()
def ssh = new com.mirantis.mk.Ssh()
-timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try{
- // test if change is not already merged
- ssh.prepareSshAgentKey(CREDENTIALS_ID)
- ssh.ensureKnownHosts(GERRIT_HOST)
- def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
- def doSubmit = false
- def giveVerify = false
- stage("test") {
- if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")){
- // test max CodeReview
- if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Code-Review", "+")){
- doSubmit = true
- def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
- def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
- def jobsNamespace = JOBS_NAMESPACE
- def plural_namespaces = ['salt-formulas', 'salt-models']
- // remove plural s on the end of job namespace
- if (JOBS_NAMESPACE in plural_namespaces){
- jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
- }
- // salt-formulas tests have -latest on end of the name
- if(JOBS_NAMESPACE.equals("salt-formulas")){
- gerritProject=gerritProject+"-latest"
- }
- def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
- if (_jobExists(testJob)) {
- common.infoMsg("Test job ${testJob} found, running")
- def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified", "+")
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
- ]
- giveVerify = true
- } else {
- common.infoMsg("Test job ${testJob} not found")
- }
- } else {
- common.errorMsg("Change don't have a CodeReview, skipping gate")
- }
- } else {
- common.infoMsg("Test job skipped")
- }
- }
- stage("submit review"){
- if(gerritChange.status == "MERGED"){
- common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
- }else if(doSubmit){
- if(giveVerify){
- common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
- ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
- }else{
- ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
- }
- common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
+
+slaveNode = env.SLAVE_NODE ?: 'docker'
+giveVerify = false
@NonCPS
-def _jobExists(jobName){
- return Jenkins.instance.items.find{it -> it.name.equals(jobName)}
+def isJobExists(jobName) {
+ return Jenkins.instance.items.find { it -> it.name.equals(jobName) }
+}
+
+def callJobWithExtraVars(String jobName) {
+ def gerritVars = '\n---'
+ for (envVar in env.getEnvironment()) {
+ if (envVar.key.startsWith("GERRIT_")) {
+ gerritVars += "\n${envVar.key}: '${envVar.value}'"
+ }
+ }
+ testJob = build job: jobName, parameters: [
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: gerritVars]
+ ]
+ if (testJob.getResult() != 'SUCCESS') {
+ error("Gate job ${testJob.getBuildUrl().toString()} finished with ${testJob.getResult()} !")
+ }
+ giveVerify = true
+}
+
+
+timeout(time: 12, unit: 'HOURS') {
+ node(slaveNode) {
+ try {
+ // test if change is not already merged
+ ssh.prepareSshAgentKey(CREDENTIALS_ID)
+ ssh.ensureKnownHosts(GERRIT_HOST)
+ def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
+ def doSubmit = false
+ stage("test") {
+ if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")) {
+ // test max CodeReview
+ if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
+ doSubmit = true
+ def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
+ def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
+ def jobsNamespace = JOBS_NAMESPACE
+ def plural_namespaces = ['salt-formulas', 'salt-models']
+ // remove plural s on the end of job namespace
+ if (JOBS_NAMESPACE in plural_namespaces) {
+ jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+ }
+ // salt-formulas tests have -latest on end of the name
+ if (JOBS_NAMESPACE.equals("salt-formulas")) {
+ gerritProject = gerritProject + "-latest"
+ }
+ def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+ if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
+ callJobWithExtraVars('test-mk-cookiecutter-templates')
+ } else if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ callJobWithExtraVars('test-salt-model-reclass-system')
+ } else {
+ if (isJobExists(testJob)) {
+ common.infoMsg("Test job ${testJob} found, running")
+ def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+ ]
+ giveVerify = true
+ } else {
+ common.infoMsg("Test job ${testJob} not found")
+ }
+ }
+ } else {
+ common.errorMsg("Change don't have a CodeReview, skipping gate")
+ }
+ } else {
+ common.infoMsg("Test job skipped")
+ }
+ }
+ stage("submit review") {
+ if (gerritChange.status == "MERGED") {
+ common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+ } else if (doSubmit) {
+ if (giveVerify) {
+ common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
+ ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ } else {
+ ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ }
+ common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 0924951..5e31d36 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -13,165 +13,137 @@
saltModelTesting = new com.mirantis.mk.SaltModelTesting()
ssh = new com.mirantis.mk.Ssh()
-def reclassVersion = 'v1.5.4'
-if (common.validInputParam('RECLASS_VERSION')) {
- reclassVersion = RECLASS_VERSION
-}
-slaveNode = (env.SLAVE_NODE ?: 'python&&docker')
-
-// install extra formulas required only for rendering cfg01. All others - should be fetched automatically via
-// salt.master.env state, during salt-master bootstrap.
-// TODO: In the best - those data should fetched somewhere from CC, per env\context. Like option, process _enabled
-// options from CC contexts
-// currently, just mix them together in one set
-def testCfg01ExtraFormulas = 'glusterfs jenkins logrotate maas ntp rsyslog fluentd telegraf prometheus ' +
- 'grafana backupninja'
-
+reclassVersion = env.RECLASS_VERSION ?: 'v1.5.4'
+slaveNode = env.SLAVE_NODE ?: 'python&&docker'
timeout(time: 2, unit: 'HOURS') {
- node(slaveNode) {
- def templateEnv = "${env.WORKSPACE}/template"
- def modelEnv = "${env.WORKSPACE}/model"
- def testEnv = "${env.WORKSPACE}/test"
- def pipelineEnv = "${env.WORKSPACE}/pipelines"
+ node(slaveNode) {
+ def templateEnv = "${env.WORKSPACE}/template"
+ def modelEnv = "${env.WORKSPACE}/model"
+ def testEnv = "${env.WORKSPACE}/test"
+ def pipelineEnv = "${env.WORKSPACE}/pipelines"
- try {
- def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
- def mcpVersion = templateContext.default_context.mcp_version
- def sharedReclassUrl = templateContext.default_context.shared_reclass_url
- def clusterDomain = templateContext.default_context.cluster_domain
- def clusterName = templateContext.default_context.cluster_name
- def saltMaster = templateContext.default_context.salt_master_hostname
- def localRepositories = templateContext.default_context.local_repositories.toBoolean()
- def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
- def cutterEnv = "${env.WORKSPACE}/cutter"
- def jinjaEnv = "${env.WORKSPACE}/jinja"
- def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
- def systemEnv = "${modelEnv}/classes/system"
- def targetBranch = "feature/${clusterName}"
- def templateBaseDir = "${env.WORKSPACE}/template"
- def templateDir = "${templateEnv}/template/dir"
- def templateOutputDir = templateBaseDir
- def user
- def testResult = false
- wrap([$class: 'BuildUser']) {
- user = env.BUILD_USER_ID
- }
-
- if (mcpVersion != '2018.4.0') {
- testCfg01ExtraFormulas += ' auditd'
- }
-
- currentBuild.description = clusterName
- print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
-
- stage('Download Cookiecutter template') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
- def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
- git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
- // Use refspec if exists first of all
- if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
- dir(templateEnv) {
- ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
- }
- } else {
- // Use mcpVersion git tag if not specified branch for cookiecutter-templates
- if (cookiecutterTemplateBranch == '') {
- cookiecutterTemplateBranch = mcpVersion
- // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
- if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
- cookiecutterTemplateBranch = 'master'
+ try {
+ def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+ def mcpVersion = templateContext.default_context.mcp_version
+ def sharedReclassUrl = templateContext.default_context.shared_reclass_url
+ def clusterDomain = templateContext.default_context.cluster_domain
+ def clusterName = templateContext.default_context.cluster_name
+ def saltMaster = templateContext.default_context.salt_master_hostname
+ def localRepositories = templateContext.default_context.local_repositories.toBoolean()
+ def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
+ def cutterEnv = "${env.WORKSPACE}/cutter"
+ def jinjaEnv = "${env.WORKSPACE}/jinja"
+ def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+ def systemEnv = "${modelEnv}/classes/system"
+ def targetBranch = "feature/${clusterName}"
+ def templateBaseDir = "${env.WORKSPACE}/template"
+ def templateDir = "${templateEnv}/template/dir"
+ def templateOutputDir = templateBaseDir
+ def user
+ def testResult = false
+ wrap([$class: 'BuildUser']) {
+ user = env.BUILD_USER_ID
}
- }
- git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
- }
- }
- stage('Create empty reclass model') {
- dir(path: modelEnv) {
- sh "rm -rfv .git"
- sh "git init"
- ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
- }
+ currentBuild.description = clusterName
+ print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
- def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
- // Use refspec if exists first of all
- if (sharedReclassBranch.toString().startsWith('refs/')) {
- dir(systemEnv) {
- ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
- }
- } else {
- // Use mcpVersion git tag if not specified branch for reclass-system
- if (sharedReclassBranch == '') {
- sharedReclassBranch = mcpVersion
- // Don't have nightly/testing for reclass-system repo, therefore use master
- if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
- common.warningMsg("Fetching reclass-system from master!")
- sharedReclassBranch = 'master'
+ stage('Download Cookiecutter template') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
+ def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
+ git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
+ // Use refspec if exists first of all
+ if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
+ dir(templateEnv) {
+ ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
+ }
+ } else {
+ // Use mcpVersion git tag if not specified branch for cookiecutter-templates
+ if (cookiecutterTemplateBranch == '') {
+ cookiecutterTemplateBranch = mcpVersion
+ // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(mcpVersion)) {
+ cookiecutterTemplateBranch = 'master'
+ }
+ }
+ git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
+ }
}
- }
- git.changeGitBranch(systemEnv, sharedReclassBranch)
- }
- git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
- }
- def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
- for (product in productList) {
+ stage('Create empty reclass model') {
+ dir(path: modelEnv) {
+ sh "rm -rfv .git"
+ sh "git init"
+ ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
+ }
- // get templateOutputDir and productDir
- if (product.startsWith("stacklight")) {
- templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+ def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
+ // Use refspec if exists first of all
+ if (sharedReclassBranch.toString().startsWith('refs/')) {
+ dir(systemEnv) {
+ ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
+ }
+ } else {
+ // Use mcpVersion git tag if not specified branch for reclass-system
+ if (sharedReclassBranch == '') {
+ sharedReclassBranch = mcpVersion
+ // Don't have nightly/testing for reclass-system repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(mcpVersion)) {
+ common.warningMsg("Fetching reclass-system from master!")
+ sharedReclassBranch = 'master'
+ }
+ }
+ git.changeGitBranch(systemEnv, sharedReclassBranch)
+ }
+ git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
+ }
- def stacklightVersion
- try {
- stacklightVersion = templateContext.default_context['stacklight_version']
- } catch (Throwable e) {
- common.warningMsg('Stacklight version loading failed')
- }
+ def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
+ for (product in productList) {
- if (stacklightVersion) {
- productDir = "stacklight" + stacklightVersion
- } else {
- productDir = "stacklight1"
- }
+ // get templateOutputDir and productDir
+ templateOutputDir = "${env.WORKSPACE}/output/${product}"
+ productDir = product
+ templateDir = "${templateEnv}/cluster_product/${productDir}"
+ // Bw for 2018.8.1 and older releases
+ if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
+ common.warningMsg("Old release detected! productDir => 'stacklight2' ")
+ productDir = "stacklight2"
+ templateDir = "${templateEnv}/cluster_product/${productDir}"
+ }
- } else {
- templateOutputDir = "${env.WORKSPACE}/output/${product}"
- productDir = product
- }
+ if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+ && templateContext.default_context["${product}_enabled"].toBoolean())) {
- if (product == "infra" || (templateContext.default_context["${product}_enabled"]
- && templateContext.default_context["${product}_enabled"].toBoolean())) {
+ common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
- templateDir = "${templateEnv}/cluster_product/${productDir}"
- common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+ sh "rm -rf ${templateOutputDir} || true"
+ sh "mkdir -p ${templateOutputDir}"
+ sh "mkdir -p ${outputDestination}"
- sh "rm -rf ${templateOutputDir} || true"
- sh "mkdir -p ${templateOutputDir}"
- sh "mkdir -p ${outputDestination}"
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
+ sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+ } else {
+ common.warningMsg("Product " + product + " is disabled")
+ }
+ }
- python.setupCookiecutterVirtualenv(cutterEnv)
- python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
- sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
- } else {
- common.warningMsg("Product " + product + " is disabled")
- }
- }
+ if (localRepositories && !offlineDeployment) {
+ def aptlyModelUrl = templateContext.default_context.local_model_url
+ dir(path: modelEnv) {
+ ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
+ if (!(mcpVersion in ["nightly", "testing", "stable"])) {
+ ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
+ }
+ }
+ }
- if (localRepositories && !offlineDeployment) {
- def aptlyModelUrl = templateContext.default_context.local_model_url
- dir(path: modelEnv) {
- ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
- if (!(mcpVersion in ["nightly", "testing", "stable"])) {
- ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
- }
- }
- }
-
- stage('Generate new SaltMaster node') {
- def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
- def nodeString = """classes:
+ stage('Generate new SaltMaster node') {
+ def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
+ def nodeString = """classes:
- cluster.${clusterName}.infra.config
parameters:
_param:
@@ -182,157 +154,159 @@
name: ${saltMaster}
domain: ${clusterDomain}
"""
- sh "mkdir -p ${modelEnv}/nodes/"
- writeFile(file: nodeFile, text: nodeString)
+ sh "mkdir -p ${modelEnv}/nodes/"
+ writeFile(file: nodeFile, text: nodeString)
- git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
- }
+ git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
+ }
- stage("Test") {
- if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
- sh("cp -r ${modelEnv} ${testEnv}")
- def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
- common.infoMsg("Attempt to run test against formula-version: ${mcpVersion}")
- testResult = saltModelTesting.setupAndTestNode(
- "${saltMaster}.${clusterDomain}",
- "",
- testCfg01ExtraFormulas,
- testEnv,
- 'pkg',
- mcpVersion,
- reclassVersion,
- 0,
- false,
- false,
- '',
- '',
- DockerCName)
- if (testResult) {
- common.infoMsg("Test finished: SUCCESS")
- } else {
- common.warningMsg('Test finished: FAILURE')
- }
- } else {
- common.warningMsg("Test stage has been skipped!")
+ stage("Test") {
+ if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
+ sh("cp -r ${modelEnv} ${testEnv}")
+ def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+ common.infoMsg("Attempt to run test against formula-version: ${mcpVersion}")
+ try {
+ def config = [
+ 'dockerHostname' : "${saltMaster}.${clusterDomain}",
+ 'reclassEnv' : testEnv,
+ 'formulasRevision' : mcpVersion,
+ 'reclassVersion' : reclassVersion,
+ 'dockerContainerName': DockerCName,
+ 'testContext' : 'salt-model-node'
+ ]
+ testResult = saltModelTesting.testNode(config)
+ common.infoMsg("Test finished: SUCCESS")
+ } catch (Exception ex) {
+ common.warningMsg("Test finished: FAILED")
+ testResult = false
+ }
+ } else {
+ common.warningMsg("Test stage has been skipped!")
+ }
+ }
+ stage("Generate config drives") {
+ // apt package genisoimage is required for this stage
+
+ // download create-config-drive
+ // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+ def mcpCommonScriptsBranch = templateContext['default_context']['mcp_common_scripts_branch']
+ if (mcpCommonScriptsBranch == '') {
+ mcpCommonScriptsBranch = mcpVersion
+ // Don't have n/t/s for mcp-common-scripts repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(mcpVersion)) {
+ common.warningMsg("Fetching mcp-common-scripts from master!")
+ mcpCommonScriptsBranch = 'master'
+ }
+ }
+
+ def commonScriptsRepoUrl = 'https://gerrit.mcp.mirantis.com/mcp/mcp-common-scripts'
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
+ userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch],],
+ ])
+
+ sh "cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive"
+ sh "[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data"
+
+ sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+ sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+ args = "--user-data user_data --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
+ // load data from model
+ def smc = [:]
+ smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
+ smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+ smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+ smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+ if (templateContext['default_context'].get('deploy_network_mtu')) {
+ smc['DEPLOY_NETWORK_MTU'] = templateContext['default_context']['deploy_network_mtu']
+ }
+ smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+ smc['MCP_VERSION'] = "${mcpVersion}"
+ if (templateContext['default_context']['local_repositories'] == 'True') {
+ def localRepoIP = templateContext['default_context']['local_repo_url']
+ smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+ smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+ smc['PIPELINES_FROM_ISO'] = 'false'
+ smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+ smc['LOCAL_REPOS'] = 'true'
+ }
+ if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
+ if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
+ smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ } else {
+ smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ }
+ }
+
+ for (i in common.entries(smc)) {
+ sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+ }
+
+ // create cfg config-drive
+ sh "./create-config-drive ${args}"
+ sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+
+ // save cfg iso to artifacts
+ archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+ if (templateContext['default_context']['local_repositories'] == 'True') {
+ def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+ sh "cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config.sh"
+
+ def smc_apt = [:]
+ smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+ smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
+ smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+ smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+ for (i in common.entries(smc_apt)) {
+ sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+ }
+
+ // create apt config-drive
+ sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+ sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+ // save apt iso to artifacts
+ archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+ }
+ }
+
+ stage('Save changes reclass model') {
+ sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
+ archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
+
+ if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+ emailext(to: EMAIL_ADDRESS,
+ attachmentsPattern: "output-${clusterName}/*",
+ body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+ subject: "Your Salt model ${clusterName}")
+ }
+ dir("output-${clusterName}") {
+ deleteDir()
+ }
+ }
+
+ // Fail, but leave possibility to get failed artifacts
+ if (!testResult && TEST_MODEL.toBoolean()) {
+ common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
+ error('Test stage finished: FAILURE')
+ }
+
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ stage('Clean workspace directories') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ }
+ // common.sendNotification(currentBuild.result,"",["slack"])
}
- }
- stage("Generate config drives") {
- // apt package genisoimage is required for this stage
-
- // download create-config-drive
- // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
- def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
- if (mcpCommonScriptsBranch == '') {
- mcpCommonScriptsBranch = mcpVersion
- // Don't have n/t/s for mcp-common-scripts repo, therefore use master
- if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
- common.warningMsg("Fetching mcp-common-scripts from master!")
- mcpCommonScriptsBranch = 'master'
- }
- }
- def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
- def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
- common.retry(3, 5) {
- sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
- sh "wget -O user_data.sh ${user_data_script_url}"
- }
-
- sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
- sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
- args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
- // load data from model
- def smc = [:]
- smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
- smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
- smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
- smc['MCP_VERSION'] = "${mcpVersion}"
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def localRepoIP = templateContext['default_context']['local_repo_url']
- smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
- smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
- smc['PIPELINES_FROM_ISO'] = 'false'
- smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
- smc['LOCAL_REPOS'] = 'true'
- }
- if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
- if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- } else {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- }
- }
-
- for (i in common.entries(smc)) {
- sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
- }
-
- // create cfg config-drive
- sh "./create-config-drive ${args}"
- sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save cfg iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
- def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
- sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
-
- def smc_apt = [:]
- smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
- smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
- for (i in common.entries(smc_apt)) {
- sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
- }
-
- // create apt config-drive
- sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
- sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save apt iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
- }
- }
-
- stage('Save changes reclass model') {
- sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
- archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
-
- if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
- emailext(to: EMAIL_ADDRESS,
- attachmentsPattern: "output-${clusterName}/*",
- body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
- subject: "Your Salt model ${clusterName}")
- }
- dir("output-${clusterName}") {
- deleteDir()
- }
- }
-
- // Fail, but leave possibility to get failed artifacts
- if (!testResult && TEST_MODEL.toBoolean()) {
- common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
- error('Test stage finished: FAILURE')
- }
-
- } catch (Throwable e) {
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- stage('Clean workspace directories') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- }
- // common.sendNotification(currentBuild.result,"",["slack"])
}
- }
}
diff --git a/generate-salt-model-docs-pipeline.groovy b/generate-salt-model-docs-pipeline.groovy
index 4a36f0e..59dd3eb 100644
--- a/generate-salt-model-docs-pipeline.groovy
+++ b/generate-salt-model-docs-pipeline.groovy
@@ -14,83 +14,83 @@
salt = new com.mirantis.mk.Salt()
timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try {
- def workspace = common.getWorkspace()
- def masterName = "cfg01." + CLUSTER_NAME.replace("-","_") + ".lab"
- def jenkinsUserIds = common.getJenkinsUserIds()
- def img = docker.image("tcpcloud/salt-models-testing:nightly")
- img.pull()
- img.inside("-u root:root --hostname ${masterName} --ulimit nofile=4096:8192 --cpus=2") {
- stage("Prepare salt env") {
- if(MODEL_GIT_REF != "" && MODEL_GIT_URL != "") {
- checkouted = gerrit.gerritPatchsetCheckout(MODEL_GIT_URL, MODEL_GIT_REF, "HEAD", CREDENTIALS_ID)
- } else {
- throw new Exception("Cannot checkout gerrit patchset, MODEL_GIT_URL or MODEL_GIT_REF is null")
- }
- if(checkouted) {
- if (fileExists('classes/system')) {
- ssh.prepareSshAgentKey(CREDENTIALS_ID)
- dir('classes/system') {
- // XXX: JENKINS-33510 dir step not work properly inside containers, so let's taky reclass system model directly
- //remoteUrl = git.getGitRemote()
- ssh.ensureKnownHosts("https://github.com/Mirantis/reclass-system-salt-model")
+ node("python") {
+ try {
+ def workspace = common.getWorkspace()
+ def masterName = "cfg01." + CLUSTER_NAME.replace("-", "_") + ".lab"
+ def jenkinsUserIds = common.getJenkinsUserIds()
+ def img = docker.image("tcpcloud/salt-models-testing:nightly")
+ img.pull()
+ img.inside("-u root:root --hostname ${masterName} --ulimit nofile=4096:8192 --cpus=2") {
+ stage("Prepare salt env") {
+ if (MODEL_GIT_REF != "" && MODEL_GIT_URL != "") {
+ checkouted = gerrit.gerritPatchsetCheckout(MODEL_GIT_URL, MODEL_GIT_REF, "HEAD", CREDENTIALS_ID)
+ } else {
+ throw new Exception("Cannot checkout gerrit patchset, MODEL_GIT_URL or MODEL_GIT_REF is null")
}
- ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
- }
- }
- withEnv(["MASTER_HOSTNAME=${masterName}", "CLUSTER_NAME=${CLUSTER_NAME}", "MINION_ID=${masterName}"]){
- sh("cp -r ${workspace}/* /srv/salt/reclass && echo '127.0.1.2 salt' >> /etc/hosts")
- sh("""bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts \
+ if (checkouted) {
+ if (fileExists('classes/system')) {
+ ssh.prepareSshAgentKey(CREDENTIALS_ID)
+ dir('classes/system') {
+ // XXX: JENKINS-33510 dir step not work properly inside containers, so let's taky reclass system model directly
+ //remoteUrl = git.getGitRemote()
+ ssh.ensureKnownHosts("https://github.com/Mirantis/reclass-system-salt-model")
+ }
+ ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
+ }
+ }
+ withEnv(["MASTER_HOSTNAME=${masterName}", "CLUSTER_NAME=${CLUSTER_NAME}", "MINION_ID=${masterName}"]) {
+ sh("cp -r ${workspace}/* /srv/salt/reclass && echo '127.0.1.2 salt' >> /etc/hosts")
+ sh("""bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts \
&& source_local_envs \
&& configure_salt_master \
&& configure_salt_minion \
&& install_salt_formula_pkg; \
saltservice_restart; \
saltmaster_init'""")
- }
- }
- stage("Generate documentation"){
- def saltResult = sh(script:"salt-call state.sls salt.minion,sphinx.server,nginx", returnStatus:true)
- if(saltResult > 0){
- common.warnMsg("Salt call salt.minion,sphinx.server,nginx failed but continuing")
+ }
}
- }
- stage("Publish outputs"){
- try {
- // /srv/static/sites/reclass_doc will be used for publishHTML step
- // /srv/static/extern will be used as tar artifact
- def outputPresent = sh(script:"ls /srv/static/sites/reclass_doc > /dev/null 2>&1 && ls /srv/static/extern > /dev/null 2>&1", returnStatus: true) == 0
- if(outputPresent){
- sh("""mkdir ${workspace}/output && \
+ stage("Generate documentation") {
+ def saltResult = sh(script: "salt-call state.sls salt.minion,sphinx.server,nginx", returnStatus: true)
+ if (saltResult > 0) {
+ common.warnMsg("Salt call salt.minion,sphinx.server,nginx failed but continuing")
+ }
+ }
+ stage("Publish outputs") {
+ try {
+ // /srv/static/sites/reclass_doc will be used for publishHTML step
+ // /srv/static/extern will be used as tar artifact
+ def outputPresent = sh(script: "ls /srv/static/sites/reclass_doc > /dev/null 2>&1 && ls /srv/static/extern > /dev/null 2>&1", returnStatus: true) == 0
+ if (outputPresent) {
+ sh("""mkdir ${workspace}/output && \
tar -zcf ${workspace}/output/docs-html.tar.gz /srv/static/sites/reclass_doc && \
tar -zcf ${workspace}/output/docs-src.tar.gz /srv/static/extern && \
cp -R /srv/static/sites/reclass_doc ${workspace}/output && \
chown -R ${jenkinsUserIds[0]}:${jenkinsUserIds[1]} ${workspace}/output""")
- publishHTML (target: [
- alwaysLinkToLastBuild: true,
- keepAll: true,
- reportDir: 'output/reclass_doc',
- reportFiles: 'index.html',
- reportName: "Reclass-documentation"
- ])
- archiveArtifacts artifacts: "output/*"
- } else {
- common.errorMsg("Documentation publish failed, one of output directories /srv/static/sites/reclass_doc or /srv/static/extern not exists!")
- }
- } catch(Exception e) {
- common.errorMsg("Documentation publish stage failed!")
+ publishHTML(target: [
+ alwaysLinkToLastBuild: true,
+ keepAll : true,
+ reportDir : 'output/reclass_doc',
+ reportFiles : 'index.html',
+ reportName : "Reclass-documentation"
+ ])
+ archiveArtifacts artifacts: "output/*"
+ } else {
+ common.errorMsg("Documentation publish failed, one of output directories /srv/static/sites/reclass_doc or /srv/static/extern not exists!")
+ }
+ } catch (Exception e) {
+ common.errorMsg("Documentation publish stage failed!")
+ }
}
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- common.sendNotification(currentBuild.result, "", ["slack"])
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ common.sendNotification(currentBuild.result, "", ["slack"])
+ }
}
- }
}
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 3e7828b..1dfc13a 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -10,6 +10,8 @@
* CTL_TARGET Salt targeted kubernetes CTL nodes (ex. I@kubernetes:master). Kubernetes control plane
* CMP_TARGET Salt targeted compute nodes (ex. cmp* and 'I@kubernetes:pool') Kubernetes computes
* PER_NODE Target nodes will be managed one by one (bool)
+ * SIMPLE_UPGRADE Use previous version of upgrade without conron/drain abilities
+ * UPGRADE_DOCKER Upgrade docker component
*
**/
def common = new com.mirantis.mk.Common()
@@ -50,6 +52,51 @@
}
}
+def cordonNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Cordoning ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl cordon ${nodeShortName}", true, 1)
+ }
+}
+
+def uncordonNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Uncordoning ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl uncordon ${nodeShortName}", true, 1)
+ }
+}
+
+def drainNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Draining ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl drain --force --ignore-daemonsets --grace-period 100 --timeout 300s --delete-local-data ${nodeShortName}", true, 1)
+ }
+}
+
+def regenerateCerts(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Regenerate certs for ${target}") {
+ salt.enforceState(pepperEnv, target, 'salt.minion.cert')
+ }
+}
+
+def upgradeDocker(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Upgrading docker at ${target}") {
+ salt.enforceState(pepperEnv, target, 'docker.host')
+ }
+}
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -73,7 +120,18 @@
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- performKubernetesControlUpdate(pepperEnv, t)
+ if (SIMPLE_UPGRADE.toBoolean()) {
+ performKubernetesControlUpdate(pepperEnv, t)
+ } else {
+ cordonNode(pepperEnv, t)
+ drainNode(pepperEnv, t)
+ regenerateCerts(pepperEnv, t)
+ if (UPGRADE_DOCKER.toBoolean()) {
+ upgradeDocker(pepperEnv, t)
+ }
+ performKubernetesControlUpdate(pepperEnv, t)
+ uncordonNode(pepperEnv, t)
+ }
}
} else {
performKubernetesControlUpdate(pepperEnv, target)
@@ -87,7 +145,18 @@
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- performKubernetesComputeUpdate(pepperEnv, t)
+ if (SIMPLE_UPGRADE.toBoolean()) {
+ performKubernetesComputeUpdate(pepperEnv, t)
+ } else {
+ cordonNode(pepperEnv, t)
+ drainNode(pepperEnv, t)
+ regenerateCerts(pepperEnv, t)
+ if (UPGRADE_DOCKER.toBoolean()) {
+ upgradeDocker(pepperEnv, t)
+ }
+ performKubernetesComputeUpdate(pepperEnv, t)
+ uncordonNode(pepperEnv, t)
+ }
}
} else {
performKubernetesComputeUpdate(pepperEnv, target)
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index af96600..4d9d498 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -33,7 +33,7 @@
def CONTROL_PKGS = 'contrail-config contrail-config-openstack contrail-control contrail-dns contrail-lib contrail-nodemgr contrail-utils contrail-web-controller contrail-web-core neutron-plugin-contrail python-contrail'
def ANALYTIC_PKGS = 'contrail-analytics contrail-lib contrail-nodemgr contrail-utils python-contrail'
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms contrail-nova-driver'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service supervisor-vrouter start'
def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 76243e5..52a0d23 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -26,13 +26,16 @@
def command = 'cmd.shell'
def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
+def thirdPartyControlPkgsToRemove = 'redis-server,ifmap-server,supervisor'
def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
+def thirdPartyAnalyticsPkgsToRemove = 'redis-server,supervisor'
//def cmpPkgs = ['contrail-lib', 'contrail-nodemgr', 'contrail-utils', 'contrail-vrouter-agent', 'contrail-vrouter-utils', 'python-contrail', 'python-contrail-vrouter-api', 'python-opencontrail-vrouter-netns', 'contrail-vrouter-dkms']
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
-def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper']
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service contrail-vrouter-agent start; service contrail-vrouter-nodemgr start'
+def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper', 'redis-server']
def configServices = ['contrail-webui-jobserver', 'contrail-webui-webserver', 'supervisor-config', 'supervisor-database', 'zookeeper']
-def controlServices = ['ifmap-server', 'supervisor-control']
+def controlServices = ['ifmap-server', 'supervisor-control', 'redis-server']
+def thirdPartyServicesToDisable = ['kafka', 'zookeeper', 'cassandra']
def config4Services = ['zookeeper', 'contrail-webui-middleware', 'contrail-webui', 'contrail-api', 'contrail-schema', 'contrail-svc-monitor', 'contrail-device-manager', 'contrail-config-nodemgr', 'contrail-database']
def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
@@ -107,7 +110,7 @@
common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
throw er
}
-
+
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
try {
@@ -167,14 +170,14 @@
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
- //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs])
- //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs])
- for (service in controlServices) {
+ for (service in (controlServices + thirdPartyServicesToDisable)) {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
}
- for (service in analyticsServices) {
+ for (service in (analyticsServices + thirdPartyServicesToDisable)) {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
- }
+ }
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs + ',' + thirdPartyControlPkgsToRemove])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs + ',' + thirdPartyAnalyticsPkgsToRemove])
}
@@ -305,6 +308,12 @@
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+ for (service in (controlServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.enable', [service])
+ }
+ for (service in (analyticsServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.enable', [service])
+ }
}
}
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
deleted file mode 100644
index 2984b55..0000000
--- a/openstack-compute-upgrade.groovy
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
- * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
- * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
- * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
- * INTERACTIVE Ask interactive questions during pipeline run (bool).
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def targetTestSubset
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
-def args
-def command
-def commandKwargs
-def probe = 1
-
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- stage('List target servers') {
- minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
- if (minions.isEmpty()) {
- throw new Exception("No minion was targeted")
- }
-
- if (TARGET_SUBSET_TEST != "") {
- targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
- } else {
- targetTestSubset = minions.join(' or ')
- }
- targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
- targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
- targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
- targetLiveAll = minions.join(' or ')
- common.infoMsg("Found nodes: ${targetLiveAll}")
- common.infoMsg("Selected test nodes: ${targetTestSubset}")
- common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
- }
-
-
- stage("Add new repos on test nodes") {
- salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
- }
-
-
- opencontrail = null
-
- try {
- opencontrail = salt.cmdRun(pepperEnv, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
- print(opencontrail)
- } catch (Exception er) {
- common.infoMsg("opencontrail is not used")
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on test nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetTestSubset, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetTestSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- stage("List package upgrades") {
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample nodes') {
- input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
- }
- }
-
- stage("Add new repos on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on sample nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetLiveSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
-
- stage('Test upgrade on sample') {
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, args)
- } catch (Exception er) {
- print(er)
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample') {
- input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
- }
- }
-
- command = "cmd.run"
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- openvswitch = null
-
- try {
- openvswitch = salt.cmdRun(pepperEnv, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
- } catch (Exception er) {
- common.infoMsg("openvswitch is not used")
- }
-
- if(openvswitch != null) {
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run salt states on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'neutron'])
- }
- } else {
- stage("Run salt states on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'linux.system.repo'])
- }
- }
-
- stage("Run Highstate on sample nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveSubset)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on all targeted nodes') {
- timeout(time: 2, unit: 'HOURS') {
- input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
- }
- }
- }
-
- stage("Add new repos on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on all targeted nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetLiveAll, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetLiveAll, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- if(openvswitch != null) {
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run salt states on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'neutron'])
- }
- } else {
- stage("Run salt states on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'linux.system.repo'])
- }
- }
-
- stage("Run Highstate on all targeted nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveAll)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
-
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 89b5e77..6a6eea2 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -1,582 +1,192 @@
/**
+ * Upgrade OpenStack packages on control plane nodes.
+ * There are no silver boollet in uprading cloud.
* Update packages on given nodes
*
* Expected parameters:
* SALT_MASTER_CREDENTIALS Credentials to the Salt API.
* SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
- * STAGE_TEST_UPGRADE Run test upgrade stage (bool)
- * STAGE_REAL_UPGRADE Run real upgrade stage (bool)
- * STAGE_ROLLBACK_UPGRADE Run rollback upgrade stage (bool)
- * SKIP_VM_RELAUNCH Set to true if vms should not be recreated (bool)
- * OPERATING_SYSTEM_RELEASE_UPGRADE Set to true if operating system of vms should be upgraded to newer release (bool)
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade)
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
* INTERACTIVE Ask interactive questions during pipeline run (bool).
*
+ * TODO:
+ * * Add OS_RELEASE_UPGRADE
**/
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
-def getNodeProvider(pepperEnv, name) {
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic api, service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify that compute/neutron agents on hosts are up.
+ * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
+ 'State result': 'Basic checks around services API are passed.'
+ ])
+upgradeStageMap.put('Stop OpenStack services',
+ [
+ 'Description': 'All OpenStack python services will be stopped on All control nodes. This does not affect data plane services such as openvswitch or qemu.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack python services are stopped.
+ * OpenStack API are not accessible from this point.
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Stop OpenStack python services''',
+ 'State result': 'OpenStack python services are stopped',
+ ])
+upgradeStageMap.put('Upgrade OpenStack',
+ [
+ 'Description': 'OpenStack python code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of OpenStack packages
+ * Render version of configs
+ * Apply offline dbsync
+ * Start OpenStack services
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * OpenStack packages are upgraded
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected to new control plane
+'''
+ ])
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * System packages are updated
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected
+ * Node might be rebooted
+'''
+ ])
+
+def stopOpenStackServices(env, target) {
def salt = new com.mirantis.mk.Salt()
- def kvm = salt.getKvmMinionId(pepperEnv)
- return salt.getReturnValues(salt.getPillar(pepperEnv, "${kvm}", "salt:control:cluster:internal:node:${name}:provider"))
-}
-
-def stopServices(pepperEnv, probe, target, type) {
def openstack = new com.mirantis.mk.Openstack()
- def services = []
- if (type == 'prx') {
- services.add('keepalived')
- services.add('nginx')
- } else if (type == 'ctl') {
- services.add('keepalived')
- services.add('haproxy')
- services.add('nova')
- services.add('cinder')
- services.add('glance')
- services.add('heat')
- services.add('neutron')
- services.add('apache2')
- }
- openstack.stopServices(pepperEnv, probe, target, services)
-}
-
-def retryStateRun(pepperEnv, target, state) {
def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- try {
- salt.enforceState(pepperEnv, target, state)
- } catch (Exception e) {
- common.warningMsg("running ${state} state again")
- salt.enforceState(pepperEnv, target, state)
+
+ def services = openstack.getOpenStackUpgradeServices(env, target)
+ def st
+ for (service in services){
+ st = "${service}.upgrade.service_stopped".trim()
+ common.infoMsg("Stopping ${st} services on ${target}")
+ salt.enforceState(env, target, st)
}
}
-def stateRun(pepperEnv, target, state) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- try {
- salt.enforceState(pepperEnv, target, state)
- } catch (Exception e) {
- common.warningMsg("Some parts of ${state} state failed. We should continue to run.")
- }
+def snapshotVM(env, domain, snapshotName) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+
+ def target = salt.getNodeProvider(env, domain)
+
+ // TODO: gracefully migrate all workloads from VM, and stop it
+ salt.runSaltProcessStep(env, target, 'virt.shutdown', [domain], null, true, 3600)
+
+ //TODO: wait while VM is powered off
+
+ common.infoMsg("Creating snapshot ${snapshotName} for VM ${domain} on node ${target}")
+ salt.runSaltProcessStep(env, target, 'virt.snapshot', [domain, snapshotName], null, true, 3600)
}
+def revertSnapshotVM(env, domain, snapshotName, ensureUp=true) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
-def vcpTestUpgrade(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def test_upgrade_node = "upg01"
- salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.refresh_pillar', [], null, true, 2)
+ def target = salt.getNodeProvider(env, domain)
- stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
- stateRun(pepperEnv, 'I@salt:master', 'salt.master')
- stateRun(pepperEnv, 'I@salt:master', 'reclass')
- stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
+ common.infoMsg("Reverting snapshot ${snapshotName} for VM ${domain} on node ${target}")
+ salt.runSaltProcessStep(env, target, 'virt.revert_snapshot', [snapshotName, domain], null, true, 3600)
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
+ if (ensureUp){
+ salt.runSaltProcessStep(env, target, 'virt.start', [domain], null, true, 300)
+ }
+}
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
+ def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+ if (upgradeTargets.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
}
- def domain = salt.getDomainName(pepperEnv)
-
- def backupninja_backup_host = salt.getReturnValues(salt.getPillar(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', '_param:backupninja_backup_host'))
-
- if (SKIP_VM_RELAUNCH.toBoolean() == false) {
-
- def upgNodeProvider = getNodeProvider(pepperEnv, test_upgrade_node)
-
- salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["${test_upgrade_node}.${domain}"])
- salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["${test_upgrade_node}.${domain}"])
-
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${test_upgrade_node}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg("${test_upgrade_node}.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
- }
-
- // salt 'kvm02*' state.sls salt.control
- stateRun(pepperEnv, "${upgNodeProvider}", 'salt.control')
- // wait until upg node is registered in salt-key
- salt.minionPresent(pepperEnv, 'I@salt:master', test_upgrade_node)
- // salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.refresh_pillar', [])
- // salt '*' saltutil.sync_all
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.sync_all', [])
+ common.printStageMap(upgradeStageMap)
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
}
- stateRun(pepperEnv, "${test_upgrade_node}*", ['linux.network.proxy'])
- try {
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion.base"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- stateRun(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh'])
-
- try {
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- stateRun(pepperEnv, "${test_upgrade_node}*", ['ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['rabbitmq', 'memcached'])
- try {
- salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', ['openssh.client', 'salt.minion'])
- } catch (Exception e) {
- common.warningMsg('salt-minion was restarted. We should continue to run')
- }
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'saltutil.sync_grains')
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.flush')
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.update')
- salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja')
- try {
- salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
- } catch (Exception e) {
- common.warningMsg('salt-minion was restarted. We should continue to run')
- }
-
- salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
- salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"])
- try {
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', "arp -d ${backupninja_backup_host}")
- } catch (Exception e) {
- common.warningMsg('The ARP entry does not exist. We should continue to run.')
- }
- salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"])
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
-
- salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
- salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
-
- def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
- if(databases && databases != ""){
- def databasesList = salt.getReturnValues(databases).trim().tokenize("\n")
- for( i = 0; i < databasesList.size(); i++){
- if(databasesList[i].toLowerCase().contains('upgrade')){
- salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"])
- common.warningMsg("removing database ${databasesList[i]}")
- salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"])
- }
- }
- salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
- }else{
- common.errorMsg("No _upgrade databases were returned")
- }
-
- try {
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
- } catch (Exception e) {
- common.warningMsg('Restarting Apache2')
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
- }
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'keystone.client')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'glance')
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
-
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova') // run nova state again as sometimes nova does not enforce itself for some reason
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'cinder')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'neutron')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'heat')
-
- salt.cmdRun(pepperEnv, "${test_upgrade_node}*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-
- if (INTERACTIVE.toBoolean() && STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Do you want to continue with upgrade?"
+ for (target in upgradeTargets){
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
}
}
-}
-
-def vcpRealUpgrade(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def openstack = new com.mirantis.mk.Openstack()
- def virsh = new com.mirantis.mk.Virsh()
-
- def upgrade_target = []
- upgrade_target.add('I@horizon:server')
- upgrade_target.add('I@keystone:server and not upg*')
-
- def proxy_general_target = "I@horizon:server"
- def control_general_target = "I@keystone:server and not upg*"
- def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
-
- def snapshotName = "upgradeSnapshot1"
-
- def domain = salt.getDomainName(pepperEnv)
- def errorOccured = false
-
- for (tgt in upgrade_target) {
- def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
- def node = salt.getFirstMinion(pepperEnv, "${tgt}")
- def general_target = ""
-
- if (tgt.toString().contains('horizon:server')) {
- general_target = 'prx'
- } else if (tgt.toString().contains('keystone:server')) {
- general_target = 'ctl'
- }
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- stopServices(pepperEnv, node, tgt, general_target)
- }
-
- for (t in target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, t)
- if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
- sleep(2)
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"])
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
- }
- } else if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, snapshotName)
- }
- }
+ for (target in upgradeTargets) {
+ common.stageWrapper(upgradeStageMap, "Stop OpenStack services", target, interactive) {
+ stopOpenStackServices(env, target)
+ }
}
- if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
+ for (target in upgradeTargets) {
+ common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade')
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
- salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
-
- for (tgt in upgrade_target) {
- salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
}
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
}
-
- // salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.refresh_pillar', [])
- // salt '*' saltutil.sync_all
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.sync_all', [])
-
- stateRun(pepperEnv, upgrade_general_target, ['linux.network.proxy'])
- try {
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion.base"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
-
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux.system.repo'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'pkg.install', ['salt-minion'], null, true, 5)
- salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
-
- // Apply package upgrades
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades --allow-unauthenticated -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
- common.warningMsg("Running apt dist-upgrade on ${proxy_general_target} and ${control_general_target}, this might take a while...")
- out = salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'cmd.run', [args])
- // stop services again
- def proxy_node = salt.getFirstMinion(pepperEnv, proxy_general_target)
- def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
- stopServices(pepperEnv, proxy_node, proxy_general_target, 'prx')
- stopServices(pepperEnv, control_node, control_general_target, 'ctl')
- salt.printSaltCommandResult(out)
- if (out.toString().contains("dpkg returned an error code")){
- if (INTERACTIVE.toBoolean()) {
- input message: "Apt dist-upgrade failed, please fix it manually and then click on proceed. If unable to fix it, click on abort and run the rollback stage."
- } else {
- error("Apt dist-upgrade failed. And interactive mode was disabled, failing...")
- }
- }
- // run base states
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
- } else {
- // initial VM setup
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['ntp', 'rsyslog'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['rsyslog'])
- }
-
- try {
- try {
- salt.enforceState(pepperEnv, control_general_target, ['memcached', 'keystone.server'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
- } catch (Exception e) {
- common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
- salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
- }
- // salt 'ctl01*' state.sls keystone.client
- retryStateRun(pepperEnv, "I@keystone:client and ${control_general_target}", 'keystone.client')
- retryStateRun(pepperEnv, control_general_target, 'glance')
- salt.enforceState(pepperEnv, control_general_target, 'glusterfs.client')
- salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
- retryStateRun(pepperEnv, control_general_target, 'nova')
- retryStateRun(pepperEnv, control_general_target, 'cinder')
- retryStateRun(pepperEnv, control_general_target, 'neutron')
- retryStateRun(pepperEnv, control_general_target, 'heat')
- } catch (Exception e) {
- errorOccured = true
- if (INTERACTIVE.toBoolean()){
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- input message: "Some states that require syncdb failed. Please check the reason. Click proceed only if you want to restore database into it's pre-upgrade state. If you want restore production database and also the VMs into its pre-upgrade state please click on abort and run the rollback stage."
- } else {
- input message: "Some states that require syncdb failed. Please check the reason and click proceed only if you want to restore database into it's pre-upgrade state. Otherwise, click abort."
- }
- } else {
- error("Stage Real control upgrade failed. And interactive mode was disabled, failing...")
- }
- openstack.restoreGaleraDb(pepperEnv)
- common.errorMsg("Stage Real control upgrade failed")
- }
- if(!errorOccured){
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
-
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:client and ${control_general_target}*")) {
- salt.enforceState(pepperEnv, "I@ceph:client and ${control_general_target}*", 'ceph.client')
- }
- } catch (Exception er) {
- common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
- }
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
- salt.enforceState(pepperEnv, "I@ceph:common and ${control_general_target}*", ['ceph.common', 'ceph.setup.keyring'])
- }
- } catch (Exception er) {
- common.warningMsg("Ceph common state on controllers failed. Please fix it manually")
- }
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
- salt.runSaltProcessStep(master, "I@ceph:common and ${control_general_target}*", 'service.restart', ['glance-api', 'glance-glare', 'glance-registry'])
- }
- } catch (Exception er) {
- common.warningMsg("Restarting Glance services on controllers failed. Please fix it manually")
- }
- }
-
- // salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
-
- retryStateRun(pepperEnv, proxy_general_target, 'keepalived')
- retryStateRun(pepperEnv, proxy_general_target, 'horizon')
- retryStateRun(pepperEnv, proxy_general_target, 'nginx')
- retryStateRun(pepperEnv, proxy_general_target, 'memcached')
-
- try {
- salt.enforceHighstate(pepperEnv, control_general_target)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
- }
-
- try {
- salt.enforceHighstate(pepperEnv, proxy_general_target)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
- }
-
- try {
- salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
- } catch (Exception er) {
- common.errorMsg(er)
- }
-
- /*
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- if (INTERACTIVE.toBoolean()){
- input message: "Please verify if the control upgrade was successful! If so, by clicking proceed the original VMs disk images will be backed up and snapshot will be merged to the upgraded VMs which will finalize the upgrade procedure"
- }
- node_count = 1
- for (t in proxy_target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
- node_count++
- }
- node_count = 1
- for (t in control_target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
- node_count++
- }
- if (INTERACTIVE.toBoolean()){
- input message: "Please scroll up and look for red highlighted messages containing 'virsh blockcommit' string.
- If there are any fix it manually. Otherwise click on proceed."
- }
- }
- */
- }
-}
-
-
-def vcpRollback(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def openstack = new com.mirantis.mk.Openstack()
- def virsh = new com.mirantis.mk.Virsh()
- def snapshotName = "upgradeSnapshot1"
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
- }
-
- def domain = salt.getDomainName(pepperEnv)
-
- def rollback_target = []
- rollback_target.add('I@horizon:server')
- rollback_target.add('I@keystone:server and not upg*')
-
- def control_general_target = "I@keystone:server and not upg*"
- def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
-
- openstack.restoreGaleraDb(pepperEnv)
-
- for (tgt in rollback_target) {
- def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
- def node = salt.getFirstMinion(pepperEnv, "${tgt}")
- def general_target = salt.getMinionsGeneralName(pepperEnv, "${tgt}")
-
- if (tgt.toString().contains('horizon:server')) {
- general_target = 'prx'
- } else if (tgt.toString().contains('keystone:server')) {
- general_target = 'ctl'
- }
-
- for (t in target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, t)
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
- sleep(2)
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"])
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
- }
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
- } else {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "virsh define /var/lib/libvirt/images/${target}.${domain}.xml")
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
- virsh.liveSnapshotAbsent(pepperEnv, nodeProvider, target, snapshotName)
- }
- }
- }
-
- // salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
- for (tgt in rollback_target) {
- salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
- }
- }
-
- salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
-
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
-
- def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
-
- salt.cmdRun(pepperEnv, "${control_node}*", '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
-}
-
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node() {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- if (STAGE_TEST_UPGRADE.toBoolean() == true) {
- stage('Test upgrade') {
- vcpTestUpgrade(pepperEnv)
- }
- }
-
- if (STAGE_REAL_UPGRADE.toBoolean() == true) {
- stage('Real upgrade') {
- // # actual upgrade
- vcpRealUpgrade(pepperEnv)
- }
-
- if (INTERACTIVE.toBoolean() && STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click on proceed to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
- }
- }
- }
-
- if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
- stage('Rollback upgrade') {
- if (INTERACTIVE.toBoolean()){
- stage('Ask for manual confirmation') {
- input message: "Before rollback please check the documentation for reclass model changes. Do you really want to continue with the rollback?"
- }
- }
- vcpRollback(pepperEnv)
- }
- }
- }
+ }
}
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
new file mode 100644
index 0000000..88bbf57
--- /dev/null
+++ b/openstack-data-upgrade.groovy
@@ -0,0 +1,185 @@
+/**
+ * Upgrade OpenStack packages on gateway nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade)
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
+ * INTERACTIVE Ask interactive questions during pipeline run (bool).
+ *
+ * TODO:
+ * * Add OS_RELEASE_UPGRADE
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def openstack = new com.mirantis.mk.Openstack()
+def debian = new com.mirantis.mk.Debian()
+
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic api, service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify that compute/neutron agents on hosts are up.
+ * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
+ 'State result': 'Basic checks around services API are passed.'
+ ])
+upgradeStageMap.put('Upgrade pre: migrate resources',
+ [
+ 'Description': 'In order to minimize workload downtime smooth resource migration is happening during this phase. Neutron agents on node are set to admin_disabled state, to make sure they are quickly migrated to new node (1-2 ping loss). Instances might be live-migrated from host (this stage is optional) and configured from pillar.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * Small workload downtime''',
+ 'Launched actions': '''
+ * Set neutron agents to admin disabled sate
+ * Migrate instances if allowed (optional).''',
+ 'State result': '''
+ * Hosts are being removed from scheduling to host new resources.
+ * If instance migration was performed no instances should be present.'''
+ ])
+upgradeStageMap.put('Upgrade OpenStack',
+ [
+ 'Description': 'OpenStack python code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of OpenStack packages
+ * Render version of configs
+ * Apply offline dbsync
+ * Start OpenStack services
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * OpenStack packages are upgraded
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected to new control plane
+'''
+ ])
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * System packages are updated
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected
+ * Node might be rebooted
+'''
+ ])
+upgradeStageMap.put('Upgrade post: enable resources',
+ [
+ 'Description': 'Verify that agents/services on node are up, add them back to scheduling.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Set neutron agents to admin sate enabled
+ * Enable nova-compute services''',
+ 'State result': 'Hosts are being added to scheduling to host new resources',
+ ])
+upgradeStageMap.put('Post upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Like cleanup old configs, cleanup temporary files. Online dbsyncs.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Cleanup os client configs''',
+ 'State result': 'Temporary resources are being cleaned.'
+ ])
+
+
+def env = "env"
+timeout(time: 24, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ def targetNodes = salt.getMinionsSorted(env, TARGET_SERVERS)
+ def migrateResources = true
+
+ if (targetNodes.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
+ }
+ if (targetNodes.size() == 1 ){
+ migrateResources = false
+ }
+
+ common.printStageMap(upgradeStageMap)
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+ }
+
+ for (target in targetNodes){
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade pre: migrate resources", target, interactive) {
+ if (migrateResources) {
+ common.infoMsg("Migrating neutron resources from ${target}")
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade.pre')
+ // Start upgrade only when resources were successfully migrated
+ }
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
+ // Stop services on node. //Do actual step by step orch here.
+ openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+ openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+ openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+ openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
+ }
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade post: enable resources", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade.post')
+ }
+ }
+ }
+}
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
deleted file mode 100644
index 87cf828..0000000
--- a/ovs-gateway-upgrade.groovy
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
- * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
- * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
- * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
- * INTERACTIVE Ask interactive questions during pipeline run (bool).
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def targetTestSubset
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
-def args
-def command
-def commandKwargs
-def probe = 1
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- stage('List target servers') {
- minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
- if (minions.isEmpty()) {
- throw new Exception("No minion was targeted")
- }
-
- if (TARGET_SUBSET_TEST != "") {
- targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
- } else {
- targetTestSubset = minions.join(' or ')
- }
- targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
- targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
- targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
- targetLiveAll = minions.join(' or ')
- common.infoMsg("Found nodes: ${targetLiveAll}")
- common.infoMsg("Selected test nodes: ${targetTestSubset}")
- common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
- }
-
-
- stage("Add new repos on test nodes") {
- salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
- }
-
- stage("List package upgrades") {
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample nodes') {
- input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
- }
- }
-
- stage("Add new repos on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
- }
-
- args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
-
- stage('Test upgrade on sample') {
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, args)
- } catch (Exception er) {
- print(er)
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample') {
- input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
- }
- }
-
- command = "cmd.run"
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run Neutron state on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['neutron'])
- }
-
- stage("Run Highstate on sample nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveSubset)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on all targeted nodes') {
- timeout(time: 2, unit: 'HOURS') {
- input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
- }
- }
- }
-
- stage("Add new repos on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
- }
-
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run Neutron state on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['neutron'])
- }
-
- stage("Run Highstate on all targeted nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveAll)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
diff --git a/promote-mirror-ubuntu-related.groovy b/promote-mirror-ubuntu-related.groovy
index cd663cf..f5e97be 100644
--- a/promote-mirror-ubuntu-related.groovy
+++ b/promote-mirror-ubuntu-related.groovy
@@ -15,7 +15,7 @@
node() {
stage("Promote") {
catchError {
- for (String jobname : ['mirror-snapshot-name-maas-xenial', 'mirror-snapshot-name-ubuntu', 'mirror-snapshot-name-maas-ephemeral-v3']) {
+ for (String jobname : ['mirror-snapshot-name-maas-xenial', 'mirror-snapshot-name-ubuntu', 'ebf-hotfix-ubuntu', 'ebf-update-ubuntu', 'mirror-snapshot-name-maas-ephemeral-v3']) {
build job: jobname, parameters: [
[$class: 'StringParameterValue', name: 'SNAPSHOT_NAME', value: SNAPSHOT_NAME],
[$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: SNAPSHOT_ID],
diff --git a/promote-vcp-images.groovy b/promote-vcp-images.groovy
index 181eafa..7b4f80e 100644
--- a/promote-vcp-images.groovy
+++ b/promote-vcp-images.groovy
@@ -17,6 +17,8 @@
slaveNode = env.SLAVE_NODE ?: 'jsl23.mcp.mirantis.net'
def job_env = env.getEnvironment().findAll { k, v -> v }
def verify = job_env.VERIFY_DOWNLOAD ?: true
+def overwrite = job_env.FORCE_OVERWRITE.toBoolean() ?: false
+
timeout(time: 6, unit: 'HOURS') {
@@ -91,7 +93,7 @@
remoteImageStatus = ''
remoteImageStatus = sh(script: "wget --auth-no-challenge --spider ${targetImageUrl} 2>/dev/null", returnStatus: true)
// wget return code 8 ,if file not exist
- if (remoteImageStatus != '8') {
+ if (remoteImageStatus != 8 && !overwrite) {
error("Attempt to overwrite existing release! Target: ${targetImage} already exist!")
}
}
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 1972465..470f338 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -46,7 +46,7 @@
[$class: 'StringParameterValue', name: 'TARGET_REGISTRY_CREDENTIALS_ID', value: dockerCredentials],
[$class: 'StringParameterValue', name: 'REGISTRY_URL', value: dockerRegistryUrl],
[$class: 'StringParameterValue', name: 'IMAGE_TAG', value: targetTag],
- [$class: 'StringParameterValue', name: 'IMAGE_LIST', value: imageList],
+ [$class: 'TextParameterValue', name: 'IMAGE_LIST', value: imageList],
[$class: 'StringParameterValue', name: 'SOURCE_IMAGE_TAG', value: sourceImageTag],
]
}
@@ -58,9 +58,16 @@
]
}
+def triggerEbfRepoJob(snapshotId, snapshotName) {
+ build job: "ebf-snapshot-name-all", parameters: [
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_NAME', value: snapshotName],
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: snapshotId],
+ ]
+}
+
def triggerGitTagJob(gitRepoList, gitCredentials, tag, sourceTag) {
build job: "tag-git-repos-all", parameters: [
- [$class: 'StringParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
+ [$class: 'TextParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
[$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
[$class: 'StringParameterValue', name: 'TAG', value: tag],
[$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
@@ -69,9 +76,10 @@
def triggerPromoteVCPJob(VcpImageList, tag, sourceTag) {
build job: "promote-vcp-images-all", parameters: [
- [$class: 'StringParameterValue', name: 'VCP_IMAGE_LIST', value: VcpImageList],
+ [$class: 'TextParameterValue', name: 'VCP_IMAGE_LIST', value: VcpImageList],
[$class: 'StringParameterValue', name: 'TAG', value: tag],
- [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag]
+ [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
+ [$class: 'BooleanParameterValue', name: 'FORCE_OVERWRITE', value: true],
]
}
@@ -89,6 +97,11 @@
triggerMirrorRepoJob(SOURCE_REVISION, TARGET_REVISION)
}
+ if (RELEASE_EBF_MIRRORS.toBoolean()) {
+ common.infoMsg("Promoting Emergency Bug Fix Debmirrors")
+ triggerEbfRepoJob(SOURCE_REVISION, TARGET_REVISION)
+ }
+
if (RELEASE_DOCKER.toBoolean()) {
common.infoMsg("Promoting Docker images")
triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, TARGET_REVISION, DOCKER_IMAGES, SOURCE_REVISION)
diff --git a/test-cookiecutter-reclass-chunk.groovy b/test-cookiecutter-reclass-chunk.groovy
index 9e34cea..ebc4f9a 100644
--- a/test-cookiecutter-reclass-chunk.groovy
+++ b/test-cookiecutter-reclass-chunk.groovy
@@ -12,16 +12,36 @@
slaveNode = env.SLAVE_NODE ?: 'python&&docker'
timeout(time: 1, unit: 'HOURS') {
- node(slaveNode) {
- try {
- extraVars = readYaml text: EXTRA_VARIABLES_YAML
- currentBuild.description = extraVars.modelFile
- saltModelTesting.testCCModel(extraVars)
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
+ node(slaveNode) {
+ stage("RunTest") {
+ try {
+ extraVars = readYaml text: EXTRA_VARIABLES_YAML
+ currentBuild.description = extraVars.modelFile
+ sh(script: 'find . -mindepth 1 -delete || true', returnStatus: true)
+ sh(script: """
+ wget --progress=dot:mega --auth-no-challenge -O models.tar.gz ${extraVars.MODELS_TARGZ}
+ tar -xzf models.tar.gz
+ """)
+ common.infoMsg("Going to test exactly one context: ${extraVars.modelFile}\n, with params: ${extraVars}")
+
+ def content = readFile(file: extraVars.modelFile)
+ def templateContext = readYaml text: content
+ def config = [
+ 'dockerHostname': "cfg01.${templateContext.default_context.cluster_domain}",
+ 'clusterName': templateContext.default_context.cluster_name,
+ 'reclassEnv': extraVars.testReclassEnv,
+ 'formulasRevision': extraVars.DISTRIB_REVISION,
+ 'reclassVersion': extraVars.reclassVersion,
+ 'dockerContainerName': extraVars.DockerCName,
+ 'testContext': extraVars.modelFile
+ ]
+ saltModelTesting.testNode(config)
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
}
- }
}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index e6d3070..6f73570 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -1,17 +1,68 @@
+/*
+Able to be triggered from Gerrit if :
+Variators:
+Modes:
+1) manual run via job-build , possible to pass refspec
+ TODO: currently impossible to use custom COOKIECUTTER_TEMPLATE_URL| RECLASS_SYSTEM_URL Gerrit-one always used.
+ - for CC
+ - Reclass
+
+2) gerrit trigger
+ Automatically switches if GERRIT_PROJECT variable detected
+ Always test GERRIT_REFSPEC VS GERRIT_BRANCH-master version of opposite project
+ */
+
common = new com.mirantis.mk.Common()
gerrit = new com.mirantis.mk.Gerrit()
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
-gerritRef = env.GERRIT_REFSPEC ?: null
-slaveNode = (env.SLAVE_NODE ?: 'python&&docker')
-def alreadyMerged = false
-
-def reclassVersion = 'v1.5.4'
-if (common.validInputParam('RECLASS_VERSION')) {
- reclassVersion = RECLASS_VERSION
+def extraVarsYAML = env.EXTRA_VARIABLES_YAML ?: false
+if (extraVarsYAML) {
+ common.mergeEnv(env, extraVarsYAML)
}
+slaveNode = env.SLAVE_NODE ?: 'docker'
+checkIncludeOrder = env.CHECK_INCLUDE_ORDER ?: false
+
+// Global var's
+alreadyMerged = false
+gerritConData = [credentialsId : env.CREDENTIALS_ID,
+ gerritName : env.GERRIT_NAME ?: 'mcp-jenkins',
+ gerritHost : env.GERRIT_HOST ?: 'gerrit.mcp.mirantis.com',
+ gerritScheme : env.GERRIT_SCHEME ?: 'ssh',
+ gerritPort : env.GERRIT_PORT ?: '29418',
+ gerritRefSpec : null,
+ gerritProject : null,
+ withWipeOut : true,
+ GERRIT_CHANGE_NUMBER: null]
+//
+//ccTemplatesRepo = env.COOKIECUTTER_TEMPLATE_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates'
+gerritDataCCHEAD = [:]
+gerritDataCC = [:]
+gerritDataCC << gerritConData
+gerritDataCC['gerritBranch'] = env.COOKIECUTTER_TEMPLATE_BRANCH ?: 'master'
+gerritDataCC['gerritRefSpec'] = env.COOKIECUTTER_TEMPLATE_REF ?: null
+gerritDataCC['gerritProject'] = 'mk/cookiecutter-templates'
+//
+//reclassSystemRepo = env.RECLASS_SYSTEM_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system'
+gerritDataRSHEAD = [:]
+gerritDataRS = [:]
+gerritDataRS << gerritConData
+gerritDataRS['gerritBranch'] = env.RECLASS_MODEL_BRANCH ?: 'master'
+gerritDataRS['gerritRefSpec'] = env.RECLASS_SYSTEM_GIT_REF ?: null
+gerritDataRS['gerritProject'] = 'salt-models/reclass-system'
+
+// version of debRepos, aka formulas\reclass
+testDistribRevision = env.DISTRIB_REVISION ?: 'nightly'
+reclassVersion = 'v1.5.4'
+if (env.RECLASS_VERSION) {
+ reclassVersion = env.RECLASS_VERSION
+}
+// Name of sub-test chunk job
+chunkJobName = "test-mk-cookiecutter-templates-chunk"
+testModelBuildsData = [:]
+
def generateSaltMaster(modEnv, clusterDomain, clusterName) {
def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
def nodeString = """classes:
@@ -33,7 +84,7 @@
/**
*
* @param contextFile - path to `contexts/XXX.yaml file`
- * @param virtualenv - pyvenv with CC and dep's
+ * @param virtualenv - pyvenv with CC and dep's
* @param templateEnvDir - root of CookieCutter
* @return
*/
@@ -57,22 +108,18 @@
for (product in productList) {
// get templateOutputDir and productDir
- if (product.startsWith("stacklight")) {
- templateOutputDir = "${templateEnvDir}/output/stacklight"
- try {
- productDir = "stacklight" + templateContext.default_context['stacklight_version']
- } catch (Throwable e) {
- productDir = "stacklight1"
- }
- } else {
- templateOutputDir = "${templateEnvDir}/output/${product}"
- productDir = product
+ templateOutputDir = "${templateEnvDir}/output/${product}"
+ productDir = product
+ templateDir = "${templateEnvDir}/cluster_product/${productDir}"
+ // Bw for 2018.8.1 and older releases
+ if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
+ common.warningMsg("Old release detected! productDir => 'stacklight2' ")
+ productDir = "stacklight2"
+ templateDir = "${templateEnvDir}/cluster_product/${productDir}"
}
-
if (product == "infra" || (templateContext.default_context["${product}_enabled"]
&& templateContext.default_context["${product}_enabled"].toBoolean())) {
- templateDir = "${templateEnvDir}/cluster_product/${productDir}"
common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
sh "rm -rf ${templateOutputDir} || true"
@@ -89,28 +136,40 @@
}
}
-
-def testModel(modelFile, reclassVersion = 'v1.5.4') {
- // modelFile - `modelfiname` from model/modelfiname/modelfiname.yaml
- //* Grub all models and send it to check in paralell - by one in thread.
-
- _values_string = """
- ---
- MODELS_TARGZ: "${env.BUILD_URL}/artifact/patched_reclass.tar.gz"
- DockerCName: "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}"
- testReclassEnv: "model/${modelFile}/"
- modelFile: "contexts/${modelFile}.yml"
- DISTRIB_REVISION: "${DISTRIB_REVISION}"
- EXTRA_FORMULAS: "${env.EXTRA_FORMULAS}"
- reclassVersion: "${reclassVersion}"
- """
- build job: "test-mk-cookiecutter-templates-chunk", parameters: [
- [$class: 'StringParameterValue', name: 'EXTRA_VARIABLES_YAML',
- value : _values_string.stripIndent()],
- ]
+def getAndUnpackNodesInfoArtifact(jobName, copyTo, build) {
+ return {
+ dir(copyTo) {
+ copyArtifacts(projectName: jobName, selector: specific(build), filter: "nodesinfo.tar.gz")
+ sh "tar -xf nodesinfo.tar.gz"
+ sh "rm -v nodesinfo.tar.gz"
+ }
+ }
}
-def StepTestModel(basename) {
+def testModel(modelFile, reclassArtifactName, artifactCopyPath) {
+ // modelFile - `modelfiname` from model/modelfiname/modelfiname.yaml
+ //* Grub all models and send it to check in paralell - by one in thread.
+ def _uuid = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}_" + UUID.randomUUID().toString().take(8)
+ def _values_string = """
+ ---
+ MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}"
+ DockerCName: "${_uuid}"
+ testReclassEnv: "model/${modelFile}/"
+ modelFile: "contexts/${modelFile}.yml"
+ DISTRIB_REVISION: "${testDistribRevision}"
+ reclassVersion: "${reclassVersion}"
+ """
+ def chunkJob = build job: chunkJobName, parameters: [
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML',
+ value : _values_string.stripIndent()],
+ ]
+ // Put sub-job info into global map.
+ testModelBuildsData.put(_uuid, ['jobname' : chunkJob.fullProjectName,
+ 'copyToDir': "${artifactCopyPath}/${modelFile}",
+ 'buildId' : "${chunkJob.number}"])
+}
+
+def StepTestModel(basename, reclassArtifactName, artifactCopyPath) {
// We need to wrap what we return in a Groovy closure, or else it's invoked
// when this method is called, not when we pass it to parallel.
// To do this, you need to wrap the code below in { }, and either return
@@ -118,32 +177,38 @@
// return node object
return {
node(slaveNode) {
- testModel(basename)
+ testModel(basename, reclassArtifactName, artifactCopyPath)
}
}
}
-def StepPrepareCCenv(refchange, templateEnvFolder) {
+def StepPrepareGit(templateEnvFolder, gerrit_data) {
// return git clone object
return {
+ def checkouted = false
+ common.infoMsg("StepPrepareGit: ${gerrit_data}")
// fetch needed sources
dir(templateEnvFolder) {
- if (refchange) {
- def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
+ if (gerrit_data['gerritRefSpec']) {
+ // Those part might be not work,in case manual var's pass
+ def gerritChange = gerrit.getGerritChange(gerrit_data['gerritName'], gerrit_data['gerritHost'],
+ gerrit_data['GERRIT_CHANGE_NUMBER'], gerrit_data['credentialsId'])
merged = gerritChange.status == "MERGED"
if (!merged) {
- checkouted = gerrit.gerritPatchsetCheckout([
- credentialsId: CREDENTIALS_ID
- ])
+ checkouted = gerrit.gerritPatchsetCheckout(gerrit_data)
} else {
- // update global variable for success return from pipeline
- //alreadyMerged = true
- common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
- currentBuild.result = 'ABORTED'
- throw new hudson.AbortException('change already merged')
+ // update global variable for pretty return from pipeline
+ alreadyMerged = true
+ common.successMsg("Change ${gerrit_data['GERRIT_CHANGE_NUMBER']} is already merged, no need to gate them")
+ error('change already merged')
}
} else {
- git.checkoutGitRepository(templateEnvFolder, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
+ // Get clean HEAD
+ gerrit_data['useGerritTriggerBuildChooser'] = false
+ checkouted = gerrit.gerritPatchsetCheckout(gerrit_data)
+ if (!checkouted) {
+ error("Failed to get repo:${gerrit_data}")
+ }
}
}
}
@@ -157,28 +222,142 @@
}
}
+def globalVariatorsUpdate() {
+ // Simple function, to check and define branch-around variables
+ // In general, simply make transition updates for non-master branch
+ // based on magic logic
+ def message = '<br/>'
+ if (env.GERRIT_PROJECT) {
+ // TODO are we going to have such branches?
+ if (!['nightly', 'testing', 'stable', 'proposed', 'master'].contains(env.GERRIT_BRANCH)) {
+ gerritDataCC['gerritBranch'] = env.GERRIT_BRANCH
+ gerritDataRS['gerritBranch'] = env.GERRIT_BRANCH
+ testDistribRevision = env.GERRIT_BRANCH
+ }
+ // Identify, who triggered. To whom we should pass refspec
+ if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ gerritDataRS['gerritRefSpec'] = env.GERRIT_REFSPEC
+ gerritDataRS['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
+ message = message + "<br/>RECLASS_SYSTEM_GIT_REF =>${gerritDataRS['gerritRefSpec']}"
+ } else if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
+ gerritDataCC['gerritRefSpec'] = env.GERRIT_REFSPEC
+ gerritDataCC['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
+ message = message + "<br/>COOKIECUTTER_TEMPLATE_REF =>${gerritDataCC['gerritRefSpec']}"
+ } else {
+ error("Unsuported gerrit-project triggered:${env.GERRIT_PROJECT}")
+ }
+ message = "<font color='red'>GerritTrigger detected! We are in auto-mode:</font>" +
+ "<br/>Test env variables has been changed:" +
+ "<br/>COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}" +
+ "<br/>RECLASS_MODEL_BRANCH=> ${gerritDataRS['gerritBranch']}" + message
+ } else {
+ message = "<font color='red'>Non-gerrit trigger run detected!</font>" + message
+ }
+ gerritDataCCHEAD << gerritDataCC
+ gerritDataCCHEAD['gerritRefSpec'] = null
+ gerritDataCCHEAD['GERRIT_CHANGE_NUMBER'] = null
+ gerritDataRSHEAD << gerritDataRS
+ gerritDataRSHEAD['gerritRefSpec'] = null
+ gerritDataRSHEAD['GERRIT_CHANGE_NUMBER'] = null
+ // 'binary' branch logic w\o 'release/' prefix
+ if (testDistribRevision.contains('/')) {
+ testDistribRevision = testDistribRevision.split('/')[-1]
+ }
+ // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
+ if (!common.checkRemoteBinary([apt_mk_version: testDistribRevision]).linux_system_repo_url) {
+ common.errorMsg("Binary release: ${testDistribRevision} not exist. Fallback to 'proposed'! ")
+ testDistribRevision = 'proposed'
+ message = "<br/>DISTRIB_REVISION =>${testDistribRevision}" + message
+ }
+ currentBuild.description = currentBuild.description ? message + currentBuild.description : message
+}
+
+def replaceGeneratedValues(path) {
+ def files = sh(script: "find ${path} -name 'secrets.yml'", returnStdout: true)
+ def stepsForParallel = [:]
+ stepsForParallel.failFast = true
+ files.tokenize().each {
+ stepsForParallel.put("Removing generated passwords/secrets from ${it}",
+ {
+ def secrets = readYaml file: it
+ for (String key in secrets['parameters']['_param'].keySet()) {
+ secrets['parameters']['_param'][key] = 'generated'
+ }
+ // writeYaml can't write to already existing file
+ writeYaml file: "${it}.tmp", data: secrets
+ sh "mv ${it}.tmp ${it}"
+ })
+ }
+ parallel stepsForParallel
+}
+
+def linkReclassModels(contextList, envPath, archiveName) {
+ // to be able share reclass for all subenvs
+ // Also, makes artifact test more solid - use one reclass for all of sub-models.
+ // Archive Structure will be:
+ // tar.gz
+ // ├── contexts
+ // │ └── ceph.yml
+ // ├── ${reclassDirName} <<< reclass system
+ // ├── model
+ // │ └── ceph <<< from `context basename`
+ // │ ├── classes
+ // │ │ ├── cluster
+ // │ │ └── system -> ../../../${reclassDirName}
+ // │ └── nodes
+ // │ └── cfg01.ceph-cluster-domain.local.yml
+ dir(envPath) {
+ for (String context : contextList) {
+ def basename = common.GetBaseName(context, '.yml')
+ dir("${envPath}/model/${basename}") {
+ sh(script: "mkdir -p classes/; ln -sfv ../../../../${common.GetBaseName(archiveName, '.tar.gz')} classes/system ")
+ }
+ }
+ // replace all generated passwords/secrets/keys with hardcode value for infra/secrets.yaml
+ replaceGeneratedValues("${envPath}/model")
+ // Save all models and all contexts. Warning! `h` flag must be used!
+ sh(script: "set -ex; tar -czhf ${env.WORKSPACE}/${archiveName} --exclude='*@tmp' model contexts", returnStatus: true)
+ }
+ archiveArtifacts artifacts: archiveName
+}
+
timeout(time: 1, unit: 'HOURS') {
node(slaveNode) {
+ globalVariatorsUpdate()
def templateEnvHead = "${env.WORKSPACE}/EnvHead/"
def templateEnvPatched = "${env.WORKSPACE}/EnvPatched/"
def contextFileListHead = []
def contextFileListPatched = []
def vEnv = "${env.WORKSPACE}/venv"
-
+ def headReclassArtifactName = "head_reclass.tar.gz"
+ def patchedReclassArtifactName = "patched_reclass.tar.gz"
+ def reclassNodeInfoDir = "${env.WORKSPACE}/reclassNodeInfo_compare/"
+ def reclassInfoHeadPath = "${reclassNodeInfoDir}/old"
+ def reclassInfoPatchedPath = "${reclassNodeInfoDir}/new"
try {
sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
stage('Download and prepare CC env') {
// Prepare 2 env - for patchset, and for HEAD
- paralellEnvs = [:]
+ def paralellEnvs = [:]
paralellEnvs.failFast = true
- paralellEnvs['downloadEnvHead'] = StepPrepareCCenv('', templateEnvHead)
- paralellEnvs['downloadEnvPatched'] = StepPrepareCCenv(gerritRef, templateEnvPatched)
- parallel paralellEnvs
+ paralellEnvs['downloadEnvHead'] = StepPrepareGit(templateEnvHead, gerritDataCCHEAD)
+ if (gerritDataCC.get('gerritRefSpec', null)) {
+ paralellEnvs['downloadEnvPatched'] = StepPrepareGit(templateEnvPatched, gerritDataCC)
+ parallel paralellEnvs
+ } else {
+ paralellEnvs['downloadEnvPatched'] = { common.warningMsg('No need to process: downloadEnvPatched') }
+ parallel paralellEnvs
+ sh("rsync -a --exclude '*@tmp' ${templateEnvHead} ${templateEnvPatched}")
+ }
}
stage("Check workflow_definition") {
// Check only for patchset
python.setupVirtualenv(vEnv, 'python2', [], "${templateEnvPatched}/requirements.txt")
- common.infoMsg(python.runVirtualenvCommand(vEnv, "python ${templateEnvPatched}/workflow_definition_test.py"))
+ if (gerritDataCC.get('gerritRefSpec', null)) {
+ common.infoMsg(python.runVirtualenvCommand(vEnv, "python ${templateEnvPatched}/workflow_definition_test.py"))
+ } else {
+ common.infoMsg('No need to process: workflow_definition')
+ }
}
stage("generate models") {
@@ -193,108 +372,138 @@
}
}
// Generate over 2env's - for patchset, and for HEAD
- paralellEnvs = [:]
+ def paralellEnvs = [:]
paralellEnvs.failFast = true
- paralellEnvs['GenerateEnvPatched'] = StepGenerateModels(contextFileListPatched, vEnv, templateEnvPatched)
paralellEnvs['GenerateEnvHead'] = StepGenerateModels(contextFileListHead, vEnv, templateEnvHead)
- parallel paralellEnvs
-
- // Collect artifacts
- dir(templateEnvPatched) {
- // Collect only models. For backward comparability - who know, probably someone use it..
- sh(script: "tar -czf model.tar.gz -C model ../contexts .", returnStatus: true)
- archiveArtifacts artifacts: "model.tar.gz"
+ if (gerritDataCC.get('gerritRefSpec', null)) {
+ paralellEnvs['GenerateEnvPatched'] = StepGenerateModels(contextFileListPatched, vEnv, templateEnvPatched)
+ parallel paralellEnvs
+ } else {
+ paralellEnvs['GenerateEnvPatched'] = { common.warningMsg('No need to process: GenerateEnvPatched') }
+ parallel paralellEnvs
+ sh("rsync -a --exclude '*@tmp' ${templateEnvHead} ${templateEnvPatched}")
}
- // to be able share reclass for all subenvs
- // Also, makes artifact test more solid - use one reclass for all of sub-models.
- // Archive Structure will be:
- // tar.gz
- // ├── contexts
- // │ └── ceph.yml
- // ├── global_reclass <<< reclass system
- // ├── model
- // │ └── ceph <<< from `context basename`
- // │ ├── classes
- // │ │ ├── cluster
- // │ │ └── system -> ../../../global_reclass
- // │ └── nodes
- // │ └── cfg01.ceph-cluster-domain.local.yml
-
- if (SYSTEM_GIT_URL == "") {
- git.checkoutGitRepository("${env.WORKSPACE}/global_reclass/", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+ // We need 2 git's, one for HEAD, one for PATCHed.
+ // if no patch, use head for both
+ RSHeadDir = common.GetBaseName(headReclassArtifactName, '.tar.gz')
+ RSPatchedDir = common.GetBaseName(patchedReclassArtifactName, '.tar.gz')
+ common.infoMsg("gerritDataRS= ${gerritDataRS}")
+ common.infoMsg("gerritDataRSHEAD= ${gerritDataRSHEAD}")
+ if (gerritDataRS.get('gerritRefSpec', null)) {
+ StepPrepareGit("${env.WORKSPACE}/${RSPatchedDir}/", gerritDataRS).call()
+ StepPrepareGit("${env.WORKSPACE}/${RSHeadDir}/", gerritDataRSHEAD).call()
} else {
- dir("${env.WORKSPACE}/global_reclass/") {
- if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
- common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
- throw new RuntimeException("Failed to obtain system reclass")
- }
- }
+ StepPrepareGit("${env.WORKSPACE}/${RSHeadDir}/", gerritDataRS).call()
+ sh("cd ${env.WORKSPACE} ; ln -svf ${RSHeadDir} ${RSPatchedDir}")
}
// link all models, to use one global reclass
// For HEAD
- dir(templateEnvHead) {
- for (String context : contextFileListHead) {
- def basename = common.GetBaseName(context, '.yml')
- dir("${templateEnvHead}/model/${basename}") {
- sh(script: 'mkdir -p classes/; ln -sfv ../../../../global_reclass classes/system ')
- }
- }
- // Save all models and all contexts. Warning! `h` flag must be used.
- sh(script: "tar -chzf head_reclass.tar.gz --exclude='*@tmp' model contexts global_reclass", returnStatus: true)
- archiveArtifacts artifacts: "head_reclass.tar.gz"
- // move for "Compare Pillars" stage
- sh(script: "mv -v head_reclass.tar.gz ${env.WORKSPACE}")
- }
+ linkReclassModels(contextFileListHead, templateEnvHead, headReclassArtifactName)
// For patched
- dir(templateEnvPatched) {
- for (String context : contextFileListPatched) {
- def basename = common.GetBaseName(context, '.yml')
- dir("${templateEnvPatched}/model/${basename}") {
- sh(script: 'mkdir -p classes/; ln -sfv ../../../../global_reclass classes/system ')
- }
- }
- // Save all models and all contexts. Warning! `h` flag must be used.
- sh(script: "tar -chzf patched_reclass.tar.gz --exclude='*@tmp' model contexts global_reclass", returnStatus: true)
- archiveArtifacts artifacts: "patched_reclass.tar.gz"
- // move for "Compare Pillars" stage
- sh(script: "mv -v patched_reclass.tar.gz ${env.WORKSPACE}")
- }
+ linkReclassModels(contextFileListPatched, templateEnvPatched, patchedReclassArtifactName)
}
- stage("Compare Pillars") {
+ stage("Compare cluster lvl Head/Patched") {
// Compare patched and HEAD reclass pillars
- compareRoot = "${env.WORKSPACE}/test_compare/"
+ compareRoot = "${env.WORKSPACE}/cluster_compare/"
sh(script: """
mkdir -pv ${compareRoot}/new ${compareRoot}/old
- tar -xzf patched_reclass.tar.gz --directory ${compareRoot}/new
- tar -xzf head_reclass.tar.gz --directory ${compareRoot}/old
+ tar -xzf ${patchedReclassArtifactName} --directory ${compareRoot}/new
+ tar -xzf ${headReclassArtifactName} --directory ${compareRoot}/old
""")
common.warningMsg('infra/secrets.yml has been skipped from compare!')
- rezult = common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml\'")
- currentBuild.description = rezult
+ result = '\n' + common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml|\\.git\'")
+ currentBuild.description = currentBuild.description ? currentBuild.description + result : result
}
- stage("test-contexts") {
- // Test contexts for patched only
- stepsForParallel = [:]
+ stage("TestContexts Head/Patched") {
+ def stepsForParallel = [:]
+ stepsForParallel.failFast = true
+ common.infoMsg("Found: ${contextFileListHead.size()} HEAD contexts to test.")
+ for (String context : contextFileListHead) {
+ def basename = common.GetBaseName(context, '.yml')
+ stepsForParallel.put("ContextHeadTest:${basename}", StepTestModel(basename, headReclassArtifactName, reclassInfoHeadPath))
+ }
common.infoMsg("Found: ${contextFileListPatched.size()} patched contexts to test.")
for (String context : contextFileListPatched) {
def basename = common.GetBaseName(context, '.yml')
- stepsForParallel.put("ContextPatchTest:${basename}", StepTestModel(basename))
+ stepsForParallel.put("ContextPatchedTest:${basename}", StepTestModel(basename, patchedReclassArtifactName, reclassInfoPatchedPath))
}
parallel stepsForParallel
- common.infoMsg('All tests done')
+ common.infoMsg('All TestContexts tests done')
}
+ stage("Compare NodesInfo Head/Patched") {
+ // Download all artifacts
+ def stepsForParallel = [:]
+ stepsForParallel.failFast = true
+ common.infoMsg("Found: ${testModelBuildsData.size()} nodeinfo artifacts to download.")
+ testModelBuildsData.each { bname, bdata ->
+ stepsForParallel.put("FetchData:${bname}",
+ getAndUnpackNodesInfoArtifact(bdata.jobname, bdata.copyToDir, bdata.buildId))
+ }
+ parallel stepsForParallel
+ // remove timestamp field from rendered files
+ sh("find ${reclassNodeInfoDir} -type f -exec sed -i '/ timestamp: .*/d' {} \\;")
+ // Compare patched and HEAD reclass pillars
+ result = '\n' + common.comparePillars(reclassNodeInfoDir, env.BUILD_URL, '')
+ currentBuild.description = currentBuild.description ? currentBuild.description + result : result
+ }
+ stage('Check include order') {
+ if (!checkIncludeOrder) {
+ common.infoMsg('Check include order require to much time, and currently disabled!')
+ } else {
+ def correctIncludeOrder = ["service", "system", "cluster"]
+ dir(reclassInfoPatchedPath) {
+ def nodeInfoFiles = findFiles(glob: "**/*.reclass.nodeinfo")
+ def messages = ["<b>Wrong include ordering found</b><ul>"]
+ def stepsForParallel = [:]
+ nodeInfoFiles.each { nodeInfo ->
+ stepsForParallel.put("Checking ${nodeInfo.path}:", {
+ def node = readYaml file: nodeInfo.path
+ def classes = node['classes']
+ def curClassID = 0
+ def prevClassID = 0
+ def wrongOrder = false
+ for (String className in classes) {
+ def currentClass = className.tokenize('.')[0]
+ curClassID = correctIncludeOrder.indexOf(currentClass)
+ if (currentClass != correctIncludeOrder[prevClassID]) {
+ if (prevClassID > curClassID) {
+ wrongOrder = true
+ common.warningMsg("File ${nodeInfo.path} contains wrong order of classes including: Includes for ${className} should be declared before ${correctIncludeOrder[prevClassID]} includes")
+ } else {
+ prevClassID = curClassID
+ }
+ }
+ }
+ if (wrongOrder) {
+ messages.add("<li>${nodeInfo.path} contains wrong order of classes including</li>")
+ }
+ })
+ }
+ parallel stepsForParallel
+ def includerOrder = '<b>No wrong include order</b>'
+ if (messages.size() != 1) {
+ includerOrder = messages.join('')
+ }
+ currentBuild.description = currentBuild.description ? currentBuild.description + includerOrder : includerOrder
+ }
+ }
+ }
sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
} catch (Throwable e) {
+ if (alreadyMerged) {
+ currentBuild.result = 'ABORTED'
+ currentBuild.description = "Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them"
+ return
+ }
currentBuild.result = "FAILURE"
currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
throw e
} finally {
def dummy = "dummy"
- //FAILING common.sendNotification(currentBuild.result,"",["slack"])
}
}
}
diff --git a/test-customers-salt-models.groovy b/test-customers-salt-models.groovy
index 3c0ccaf..4e84e22 100644
--- a/test-customers-salt-models.groovy
+++ b/test-customers-salt-models.groovy
@@ -22,7 +22,6 @@
// [$class: 'StringParameterValue', name: 'CLUSTER_NAME', value: modelName],
// [$class: 'StringParameterValue', name: 'NODE_TARGET', value: testTarget],
// [$class: 'StringParameterValue', name: 'FORMULAS_SOURCE', value: formulasSource]
- // [$class: 'StringParameterValue', name: 'EXTRA_FORMULAS', value: EXTRA_FORMULAS],
// [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: FORMULAS_REVISION],
// [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
// [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: SYSTEM_GIT_URL],
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
new file mode 100644
index 0000000..244126b
--- /dev/null
+++ b/test-openscap-pipeline.groovy
@@ -0,0 +1,258 @@
+/**
+ *
+ * Run openscap xccdf evaluation on given nodes
+ *
+ * Expected parametes:
+ * SALT_MASTER_URL Full Salt API address.
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ *
+ * XCCDF_BENCHMARKS_DIR The XCCDF benchmarks base directory (default /usr/share/xccdf-benchmarks/mirantis/)
+ * XCCDF_BENCHMARKS List of pairs XCCDF benchmark filename and corresponding profile separated with ','
+ * these pairs are separated with semicolon.
+ * (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile)
+ * XCCDF_VERSION The XCCDF version (default 1.2)
+ * XCCDF_TAILORING_ID The tailoring id (default None)
+ *
+ * TARGET_SERVERS The target Salt nodes (default *)
+ *
+ * ARTIFACTORY_URL The artifactory URL
+ * ARTIFACTORY_NAMESPACE The artifactory namespace (default 'mirantis/openscap')
+ * ARTIFACTORY_REPO The artifactory repo (default 'binary-dev-local')
+ *
+ * UPLOAD_TO_DASHBOARD Boolean. Upload results to the WORP or not
+ * DASHBOARD_API_URL The WORP api base url. Mandatory if UPLOAD_TO_DASHBOARD is true
+ */
+
+
+
+/**
+ * Upload results to the `WORP` dashboard
+ *
+ * @param apiUrl The base dashboard api url
+ * @param cloudName The cloud name (mostly, the given node's domain name)
+ * @param nodeName The node name
+ * @param results The scanning results
+ */
+def uploadResultToDashboard(apiUrl, cloudName, nodeName, results) {
+ def common = new com.mirantis.mk.Common()
+ def http = new com.mirantis.mk.Http()
+
+ // Yes, we do not care of performance and will create at least 4 requests per each result
+ def requestData = [:]
+
+ def cloudId
+ def nodeId
+
+ // Let's take a look, may be our minion is already presented on the dashboard
+ // Get available environments
+ environments = common.parseJSON(http.sendHttpGetRequest("${apiUrl}/environment/"))
+ for (environment in environments) {
+ if (environment['name'] == cloudName) {
+ cloudId = environment['uuid']
+ break
+ }
+ }
+ // Cloud wasn't presented, let's create it
+ if (! cloudId ) {
+ // Create cloud
+ resuestData['name'] = cloudName
+ cloudId = common.parseJSON(http.sendHttpPostRequest("${apiUrl}/environment/", requestData))['env']['uuid']
+
+ // And the node
+ // It was done here to reduce count of requests to the api.
+ // Because if there was not cloud presented on the dashboard, then the node was not presented as well.
+ requestData['nodes'] = [nodeName]
+ nodeId = common.parseJSON(http.sendHttpPutRequest("${apiUrl}/environment/${cloudId}/nodes/", requestData))['uuid']
+ }
+
+ if (! nodeId ) {
+ // Get available nodes in our environment
+ nodes = common.parseJSON(http.sendHttpGetRequest("${apiUrl}/environment/${cloudId}/nodes/"))
+ for (node in nodes) {
+ if (node['name'] == nodeName) {
+ nodeId = node['id']
+ break
+ }
+ }
+ }
+
+ // Node wasn't presented, let's create it
+ if (! nodeId ) {
+ // Create node
+ requestData['nodes'] = [nodeName]
+ nodeId = common.parseJSON(http.sendHttpPutRequest("${apiUrl}/environment/${cloudId}/nodes/", requestData))['uuid']
+ }
+
+ // Get report_id
+ requestData['env_uuid'] = cloudId
+ def reportId = common.parseJSON(http.sendHttpPostRequest("${apiUrl}/reports/openscap/", requestData))['report']['uuid']
+
+ // Upload results
+ requestData['results'] = results
+ requestData['node_name'] = nodeName
+ http.sendHttpPutRequest("${apiUrl}/reports/openscap/${reportId}/", requestData)
+}
+
+
+node('python') {
+ def pepperEnv = 'pepperEnv'
+
+ // XCCDF related variables
+ def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
+ def benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/'
+ def xccdfVersion = XCCDF_VERSION ?: '1.2'
+ def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+ def targetServers = TARGET_SERVERS ?: '*'
+
+ def salt = new com.mirantis.mk.Salt()
+ def python = new com.mirantis.mk.Python()
+ def common = new com.mirantis.mk.Common()
+ def http = new com.mirantis.mk.Http()
+
+ // To have an ability to work in heavy concurrency conditions
+ def scanUUID = UUID.randomUUID().toString()
+
+ def artifactsArchiveName = "openscap-${scanUUID}.zip"
+ def resultsBaseDir = "/var/log/openscap/${scanUUID}"
+ def artifactsDir = "openscap"
+
+ def liveMinions
+
+
+ stage ('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage ('Run openscap xccdf evaluation and attempt to upload the results to a dashboard') {
+ liveMinions = salt.getMinions(pepperEnv, targetServers)
+
+ if (liveMinions.isEmpty()) {
+ throw new Exception('There are no alive minions')
+ }
+
+ common.infoMsg("Scan UUID: ${scanUUID}")
+
+ // Clean all results before proceeding with results from every minion
+ dir(artifactsDir) {
+ deleteDir()
+ }
+
+ for (minion in liveMinions) {
+
+ // Iterate oscap evaluation over the benchmarks
+ for (benchmark in benchmarksAndProfilesArray) {
+ def (benchmarkFilePath, profile) = benchmark.tokenize(',').collect({it.trim()})
+
+ // Remove extension from the benchmark name
+ def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
+
+ // Get benchmark name
+ def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
+
+ // And build resultsDir based on this path
+ def resultsDir = "${resultsBaseDir}/${benchmarkPathWithoutExtension}"
+
+ def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
+
+ def nodeShortName = minion.tokenize('.')[0]
+
+ def archiveName = "${scanUUID}_${nodeShortName}_${benchmarkName}.tar"
+
+ // Evaluate the benchmark
+ salt.runSaltProcessStep(pepperEnv, minion, 'oscap.eval', [
+ 'xccdf', benchmarkFile, "results_dir=${resultsDir}",
+ "profile=${profile}", "xccdf_version=${xccdfVersion}",
+ "tailoring_id=${xccdfTailoringId}"
+ ])
+
+ salt.cmdRun(pepperEnv, minion, "tar -cf /tmp/${archiveName} -C ${resultsBaseDir} .")
+ fileContents = salt.cmdRun(pepperEnv, minion, "cat /tmp/${archiveName}", true, null, false)['return'][0].values()[0].replaceAll('Salt command execution success', '')
+
+ sh "mkdir -p ${artifactsDir}/${scanUUID}/${nodeShortName}"
+ writeFile file: "${archiveName}", text: fileContents
+ sh "tar --strip-components 1 -xf ${archiveName} --directory ${artifactsDir}/${scanUUID}/${nodeShortName}; rm -f ${archiveName}"
+
+ // Remove archive which is not needed anymore
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', "/tmp/${archiveName}")
+
+ // Attempt to upload the scanning results to the dashboard
+ if (UPLOAD_TO_DASHBOARD.toBoolean()) {
+ if (common.validInputParam('DASHBOARD_API_URL')) {
+ def cloudName = salt.getGrain(pepperEnv, minion, 'domain')['return'][0].values()[0].values()[0]
+ uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, salt.getFileContent(pepperEnv, minion, "${resultsDir}/results.json"))
+ } else {
+ throw new Exception('Uploading to the dashboard is enabled but the DASHBOARD_API_URL was not set')
+ }
+ }
+ }
+ }
+
+ // Prepare archive
+ sh "tar -cJf ${artifactsDir}.tar.xz ${artifactsDir}"
+
+ // Archive the build output artifacts
+ archiveArtifacts artifacts: "*.xz"
+ }
+
+/* // Will be implemented later
+ stage ('Attempt to upload results to an artifactory') {
+ if (common.validInputParam('ARTIFACTORY_URL')) {
+ for (minion in liveMinions) {
+ def destDir = "${artifactsDir}/${minion}"
+ def archiveName = "openscap-${scanUUID}.tar.gz"
+ def tempArchive = "/tmp/${archiveName}"
+ def destination = "${destDir}/${archiveName}"
+
+ dir(destDir) {
+ // Archive scanning results on the remote target
+ salt.runSaltProcessStep(pepperEnv, minion, 'archive.tar', ['czf', tempArchive, resultsBaseDir])
+
+ // Get it content and save it
+ writeFile file: destination, text: salt.getFileContent(pepperEnv, minion, tempArchive)
+
+ // Remove scanning results and the temp archive on the remote target
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', resultsBaseDir)
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', tempArchive)
+ }
+ }
+
+ def artifactory = new com.mirantis.mcp.MCPArtifactory()
+ def artifactoryName = 'mcp-ci'
+ def artifactoryRepo = ARTIFACTORY_REPO ?: 'binary-dev-local'
+ def artifactoryNamespace = ARTIFACTORY_NAMESPACE ?: 'mirantis/openscap'
+ def artifactoryServer = Artifactory.server(artifactoryName)
+ def publishInfo = true
+ def buildInfo = Artifactory.newBuildInfo()
+ def zipName = "${env.WORKSPACE}/openscap/${scanUUID}/results.zip"
+
+ // Zip scan results
+ zip zipFile: zipName, archive: false, dir: artifactsDir
+
+ // Mandatory and additional properties
+ def properties = artifactory.getBinaryBuildProperties([
+ "scanUuid=${scanUUID}",
+ "project=openscap"
+ ])
+
+ // Build Artifactory spec object
+ def uploadSpec = """{
+ "files":
+ [
+ {
+ "pattern": "${zipName}",
+ "target": "${artifactoryRepo}/${artifactoryNamespace}/openscap",
+ "props": "${properties}"
+ }
+ ]
+ }"""
+
+ // Upload artifacts to the given Artifactory
+ artifactory.uploadBinariesToArtifactory(artifactoryServer, buildInfo, uploadSpec, publishInfo)
+
+ } else {
+ common.warningMsg('ARTIFACTORY_URL was not given, skip uploading to artifactory')
+ }
+ }
+*/
+
+}
diff --git a/test-openstack-component-pipeline.groovy b/test-openstack-component-pipeline.groovy
index c660c28..010dbc0 100644
--- a/test-openstack-component-pipeline.groovy
+++ b/test-openstack-component-pipeline.groovy
@@ -4,7 +4,6 @@
* Flow parameters:
* CREDENTIALS_ID
- * EXTRA_FORMULAS
* FORMULAS_REVISION
* FORMULAS_SOURCE
* SALT_OPTS
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index ed525bd..9bbd782 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -4,12 +4,10 @@
* DEFAULT_GIT_REF
* DEFAULT_GIT_URL
* CREDENTIALS_ID
- * EXTRA_FORMULAS
* CLUSTER_NAME
* NODE_TARGET
* SYSTEM_GIT_URL
* SYSTEM_GIT_REF
- * FORMULAS_SOURCE
* RECLASS_VERSION
* MAX_CPU_PER_JOB
* LEGACY_TEST_MODE
@@ -67,38 +65,23 @@
stage("test node") {
if (checkouted) {
def workspace = common.getWorkspace()
- def testResult = false
common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
- try {
- def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
- testResult = saltModelTesting.setupAndTestNode(
- NODE_TARGET,
- CLUSTER_NAME,
- EXTRA_FORMULAS,
- workspace,
- FORMULAS_SOURCE,
- FORMULAS_REVISION,
- reclassVersion,
- MAX_CPU_PER_JOB.toInteger(),
- RECLASS_IGNORE_CLASS_NOTFOUND,
- LEGACY_TEST_MODE,
- APT_REPOSITORY,
- APT_REPOSITORY_GPG,
- DockerCName)
- } catch (Exception e) {
- if (e.getMessage() == "script returned exit code 124") {
- common.errorMsg("Impossible to test node due to timeout of salt-master, ABORTING BUILD")
- currentBuild.result = "ABORTED"
- } else {
- throw e
- }
- }
- if (testResult) {
- common.infoMsg("Test finished: SUCCESS")
- } else {
- error('Test node finished: FAILURE')
- throw new RuntimeException('Test node stage finished: FAILURE')
- }
+
+ def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+ def config = [
+ 'dockerHostname': NODE_TARGET,
+ 'clusterName': CLUSTER_NAME,
+ 'reclassEnv': workspace,
+ 'formulasRevision': FORMULAS_REVISION,
+ 'reclassVersion': reclassVersion,
+ 'dockerMaxCpus': MAX_CPU_PER_JOB.toInteger(),
+ 'ignoreClassNotfound': RECLASS_IGNORE_CLASS_NOTFOUND,
+ 'aptRepoUrl': APT_REPOSITORY,
+ 'aptRepoGPG': APT_REPOSITORY_GPG,
+ 'dockerContainerName': DockerCName,
+ 'testContext': 'salt-model-node'
+ ]
+ saltModelTesting.testNode(config)
}
}
} catch (Throwable e) {
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 6a37ac7..729fdb4 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -3,7 +3,6 @@
* DEFAULT_GIT_URL default git url (will be used if pipeline run is not triggered by gerrit)
* DEFAULT_GIT_RED default git ref (branch,tag,...) (will be used if pipeline run is not triggered by gerrit)
* CREDENTIALS_ID Jenkins credetials id for git checkout
- * EXTRA_FORMULAS extra formulas list for passing to salt bootstrap script
* MAX_CPU_PER_JOB max cpu count for one docket test instance
* SYSTEM_GIT_URL reclass system git URL (optional)
* SYSTEM_GIT_REF reclass system git URL (optional)
@@ -62,36 +61,26 @@
common = new com.mirantis.mk.Common()
def setupRunner() {
-
- def branches = [:]
- for (int i = 0; i < PARALLEL_NODE_GROUP_SIZE.toInteger() && i < futureNodes.size(); i++) {
- branches["Runner ${i}"] = {
- while (futureNodes && !failedNodes) {
- def currentNode = futureNodes[0] ? futureNodes[0] : null
+ def branches = [:]
+ branches.failFast = true
+ for(int i = 0; i < futureNodes.size(); i++) {
+ def currentNode = futureNodes[i] ? futureNodes[i] : null
if (!currentNode) {
- continue
+ continue
}
-
- def clusterName = currentNode[2]
- futureNodes.remove(currentNode)
- try {
- triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
- } catch (Exception e) {
- if (e.getMessage().contains("completed with status ABORTED")) {
- common.warningMsg("Test of ${clusterName} failed because the test was aborted : ${e}")
- futureNodes << currentNode
- } else {
- common.warningMsg("Test of ${clusterName} failed : ${e}")
- failedNodes = true
- }
+ branches["Runner ${i}"] = {
+ try {
+ triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
+ } catch (Exception e) {
+ common.warningMsg("Test of ${currentNode[2]} failed : ${e}")
+ throw e
+ }
}
- }
}
- }
- if (branches) {
- parallel branches
- }
+ if (branches) {
+ common.runParallel(branches, PARALLEL_NODE_GROUP_SIZE.toInteger())
+ }
}
def triggerTestNodeJob(defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource) {
@@ -102,7 +91,6 @@
[$class: 'StringParameterValue', name: 'CLUSTER_NAME', value: clusterName],
[$class: 'StringParameterValue', name: 'NODE_TARGET', value: testTarget],
[$class: 'StringParameterValue', name: 'FORMULAS_SOURCE', value: formulasSource],
- [$class: 'StringParameterValue', name: 'EXTRA_FORMULAS', value: EXTRA_FORMULAS],
[$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: FORMULAS_REVISION],
[$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
[$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: SYSTEM_GIT_URL],
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index fa16739..04eafeb 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -1,103 +1,103 @@
def gerrit = new com.mirantis.mk.Gerrit()
def common = new com.mirantis.mk.Common()
-def gerritCredentials
-try {
- gerritCredentials = CREDENTIALS_ID
-} catch (MissingPropertyException e) {
- gerritCredentials = "gerrit"
+// extraVarsYaml contains GERRIT_ vars from gate job
+// or will contain GERRIT_ vars from reclass-system patch
+def extraVarsYaml = env.EXTRA_VARIABLES_YAML ?: ''
+if (extraVarsYaml != '') {
+ common.mergeEnv(env, extraVarsYaml)
+} else {
+ extraVarsYaml = '\n---'
+ for (envVar in env.getEnvironment()) {
+ if (envVar.key.startsWith("GERRIT_")) {
+ extraVarsYaml += "\n${envVar.key}: '${envVar.value}'"
+ }
+ }
}
-def gerritRef
-try {
- gerritRef = GERRIT_REFSPEC
-} catch (MissingPropertyException e) {
- gerritRef = null
-}
+def slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+def gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
-def defaultGitRef, defaultGitUrl
-try {
- defaultGitRef = DEFAULT_GIT_REF
- defaultGitUrl = DEFAULT_GIT_URL
-} catch (MissingPropertyException e) {
- defaultGitRef = null
- defaultGitUrl = null
-}
+def gerritRef = env.GERRIT_REFSPEC ?: null
+def defaultGitRef = env.DEFAULT_GIT_REF ?: null
+def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
+
def checkouted = false
def merged = false
def systemRefspec = "HEAD"
-def formulasRevision = 'testing'
+
timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
- stage("Checkout") {
- if (gerritRef) {
- // job is triggered by Gerrit
- // test if change aren't already merged
- def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, gerritCredentials)
- merged = gerritChange.status == "MERGED"
- if(!merged){
- checkouted = gerrit.gerritPatchsetCheckout ([
- credentialsId : gerritCredentials
- ])
- systemRefspec = GERRIT_REFSPEC
- }
- // change defaultGit variables if job triggered from Gerrit
- defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
- } else if(defaultGitRef && defaultGitUrl) {
- checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", gerritCredentials)
+ node(slaveNode) {
+ try {
+ stage("Checkout") {
+ if (gerritRef) {
+ // job is triggered by Gerrit
+ // test if change aren't already merged
+ def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, gerritCredentials)
+ merged = gerritChange.status == "MERGED"
+ if (!merged) {
+ checkouted = gerrit.gerritPatchsetCheckout([
+ credentialsId: gerritCredentials
+ ])
+ systemRefspec = GERRIT_REFSPEC
+ }
+ // change defaultGit variables if job triggered from Gerrit
+ defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
+ } else if (defaultGitRef && defaultGitUrl) {
+ checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", gerritCredentials)
+ }
+ }
+
+ stage("Test") {
+ if (merged) {
+ common.successMsg("Gerrit change is already merged, no need to test them")
+ } else {
+ if (checkouted) {
+
+ def documentationOnly = false
+ if (gerritRef) {
+ documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
+ }
+
+ sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD | grep .yml | xargs -I {} python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
+
+ def branches = [:]
+ def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
+ if (['master'].contains(env.GERRIT_BRANCH)) {
+ for (int i = 0; i < testModels.size(); i++) {
+ def cluster = testModels[i]
+ def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
+ branches["${cluster}"] = {
+ build job: "test-salt-model-${cluster}", parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+ [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
+ [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
+ ]
+ }
+ }
+ } else {
+ common.warningMsg("Tests for ${testModels} skipped!")
+ }
+ branches["cookiecutter"] = {
+ build job: "test-mk-cookiecutter-templates", parameters: [
+ [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_URL', value: defaultGitUrl],
+ [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_GIT_REF', value: systemRefspec],
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVarsYaml ]
+ ]
+ }
+ parallel branches
+ } else {
+ error("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
+ }
+ }
+ }
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ common.sendNotification(currentBuild.result, "", ["slack"])
}
- }
-
- stage("Test") {
- if(merged){
- common.successMsg("Gerrit change is already merged, no need to test them")
- }else{
- if(checkouted){
-
- def documentationOnly = false
- if (gerritRef) {
- documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
- }
-
- sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD | grep .yml | xargs -I {} python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
-
- def branches = [:]
- def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
- for (int i = 0; i < testModels.size(); i++) {
- def cluster = testModels[i]
- def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
- branches["${cluster}"] = {
- build job: "test-salt-model-${cluster}", parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec],
- [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: formulasRevision],
- ]
- }
- }
- branches["cookiecutter"] = {
- build job: "test-mk-cookiecutter-templates", parameters: [
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec],
- [$class: 'StringParameterValue', name: 'DISTRIB_REVISION', value: formulasRevision]
-
- ]
- }
- parallel branches
- }else{
- throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
- }
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- common.sendNotification(currentBuild.result,"",["slack"])
}
- }
}
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 3c27dce..fa9a7a6 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -16,6 +16,7 @@
* RUN_TEMPEST_TESTS If not false, run Tempest tests
* RUN_RALLY_TESTS If not false, run Rally tests
* K8S_RALLY If not false, run Kubernetes Rally tests
+ * STACKLIGHT_RALLY If not false, run additional Stacklight tests
* RUN_K8S_TESTS If not false, run Kubernetes e2e/conformance tests
* RUN_SPT_TESTS If not false, run SPT tests
* SPT_SSH_USER The name of the user which should be used for ssh to nodes
@@ -31,6 +32,7 @@
* RALLY_CONFIG_REPO Git repository with files for Rally
* RALLY_CONFIG_BRANCH Git branch which will be used during the checkout
* RALLY_SCENARIOS Path to file or directory with rally scenarios
+ * RALLY_SL_SCENARIOS Path to file or directory with stacklight rally scenarios
* RALLY_TASK_ARGS_FILE Path to file with rally tests arguments
* REPORT_DIR Path for reports outside docker image
* TEST_K8S_API_SERVER Kubernetes API address
@@ -81,20 +83,21 @@
stage('Run Rally tests') {
if (RUN_RALLY_TESTS.toBoolean() == true) {
def report_dir = env.REPORT_DIR ?: '/root/qa_results'
- def platform
- def rally_variables
+ def platform = ["type":"unknown", "stacklight_enabled":false]
+ def rally_variables = []
if (K8S_RALLY.toBoolean() == false) {
- platform = 'openstack'
+ platform['type'] = 'openstack'
rally_variables = ["floating_network=${FLOATING_NETWORK}",
"rally_image=${RALLY_IMAGE}",
"rally_flavor=${RALLY_FLAVOR}",
"availability_zone=${AVAILABILITY_ZONE}"]
} else {
- platform = 'k8s'
- rally_variables = ["plugins_repo":"${RALLY_PLUGINS_REPO}",
- "plugins_branch":"${RALLY_PLUGINS_BRANCH}"]
+ platform['type'] = 'k8s'
}
- validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, platform, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables, report_dir, SKIP_LIST)
+ if (STACKLIGHT_RALLY.toBoolean() == true) {
+ platform['stacklight_enabled'] = true
+ }
+ validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, platform, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_PLUGINS_REPO, RALLY_PLUGINS_BRANCH, RALLY_SCENARIOS, RALLY_SL_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables, report_dir, SKIP_LIST)
} else {
common.infoMsg("Skipping Rally tests")
}