Merge "Remove contrail third-party services after upgrade"
diff --git a/cloud-update.groovy b/cloud-update.groovy
index d58d1e0..56f9351 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -573,14 +573,12 @@
}
try {
salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
- // purge and setup previous repos
- salt.enforceState(pepperEnv, tgt, 'linux.system.repo')
} catch (Exception e) {
common.errorMsg(e)
if (INTERACTIVE.toBoolean()) {
- input message: "Salt state linux.system.repo on ${tgt} failed. Do you want to PROCEED?."
+ input message: "Not all minions ${tgt} returned after snapshot revert. Do you want to PROCEED?."
} else {
- throw new Exception("Salt state linux.system.repo on ${tgt} failed")
+ throw new Exception("Not all minions ${tgt} returned after snapshot revert")
}
}
}
@@ -848,7 +846,7 @@
timeout(time: 12, unit: 'HOURS') {
node() {
try {
- if(RUN_CVP_TESTS.toBoolean() == True){
+ if(RUN_CVP_TESTS.toBoolean() == true){
stage('Run CVP tests before upgrade.') {
build job: "cvp-sanity"
build job: "cvp-func"
@@ -1583,7 +1581,7 @@
// verification is already present in restore pipelines
}
- if(RUN_CVP_TESTS.toBoolean() == True){
+ if(RUN_CVP_TESTS.toBoolean() == true){
stage('Run CVP tests after upgrade.') {
build job: "cvp-sanity"
build job: "cvp-func"
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index 414ab46..b33cda6 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -44,7 +44,7 @@
if (!keystone_creds) {
keystone_creds = validate._get_keystone_creds_v2(saltMaster)
}
- validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
+ validate.runContainer(saltMaster, TEMPEST_TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
validate.configureContainer(saltMaster, TEMPEST_TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO)
}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 7609103..25473fb 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -29,8 +29,6 @@
def clusterDomain = templateContext.default_context.cluster_domain
def clusterName = templateContext.default_context.cluster_name
def saltMaster = templateContext.default_context.salt_master_hostname
- def localRepositories = templateContext.default_context.local_repositories.toBoolean()
- def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
def cutterEnv = "${env.WORKSPACE}/cutter"
def jinjaEnv = "${env.WORKSPACE}/jinja"
def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
@@ -99,63 +97,9 @@
git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
}
- def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
- for (product in productList) {
-
- // get templateOutputDir and productDir
- templateOutputDir = "${env.WORKSPACE}/output/${product}"
- productDir = product
- templateDir = "${templateEnv}/cluster_product/${productDir}"
- // Bw for 2018.8.1 and older releases
- if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
- common.warningMsg("Old release detected! productDir => 'stacklight2' ")
- productDir = "stacklight2"
- templateDir = "${templateEnv}/cluster_product/${productDir}"
- }
-
- if (product == "infra" || (templateContext.default_context["${product}_enabled"]
- && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
- common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
- sh "rm -rf ${templateOutputDir} || true"
- sh "mkdir -p ${templateOutputDir}"
- sh "mkdir -p ${outputDestination}"
-
- python.setupCookiecutterVirtualenv(cutterEnv)
- python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
- sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
- } else {
- common.warningMsg("Product " + product + " is disabled")
- }
- }
-
- if (localRepositories && !offlineDeployment) {
- def aptlyModelUrl = templateContext.default_context.local_model_url
- dir(path: modelEnv) {
- ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
- if (!(mcpVersion in ["nightly", "testing", "stable"])) {
- ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
- }
- }
- }
-
- stage('Generate new SaltMaster node') {
- def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
- def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
- _param:
- linux_system_codename: xenial
- reclass_data_revision: master
- linux:
- system:
- name: ${saltMaster}
- domain: ${clusterDomain}
- """
- sh "mkdir -p ${modelEnv}/nodes/"
- writeFile(file: nodeFile, text: nodeString)
-
+ stage('Generate model') {
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ python.generateModel(COOKIECUTTER_TEMPLATE_CONTEXT, 'default_context', saltMaster, cutterEnv, modelEnv, templateEnv, false)
git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
}
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 530a256..98a4338 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -95,6 +95,13 @@
stage("Upgrading Addons at ${target}") {
salt.enforceState(pepperEnv, target, "kubernetes.master.kube-addons")
+ }
+}
+
+def updateAddonManager(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Upgrading AddonManager at ${target}") {
salt.enforceState(pepperEnv, target, "kubernetes.master.setup")
}
}
@@ -139,13 +146,19 @@
upgradeDocker(pepperEnv, t)
}
performKubernetesControlUpdate(pepperEnv, t)
- updateAddons(pepperEnv, t)
+ updateAddonManager(pepperEnv, t)
uncordonNode(pepperEnv, t)
}
}
} else {
performKubernetesControlUpdate(pepperEnv, target)
}
+ if (!SIMPLE_UPGRADE.toBoolean()) {
+ // Addons upgrade should be performed after all nodes will upgraded
+ updateAddons(pepperEnv, target)
+ // Wait for 90 sec for addons reconciling
+ sleep(90)
+ }
}
if (updates.contains("cmp")) {
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 8c445ac..7b79f4c 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -25,9 +25,11 @@
def commandKwargs
def probe = 1
def errorOccured = false
-def command = 'cmd.run'
def upgrade(master, target, service, pckg, state) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+ def command = 'cmd.run'
stage("Change ${target} repos") {
salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true, 5)
salt.enforceState(master, "${target}", 'linux.system.repo', true)
@@ -42,22 +44,25 @@
return
}
}
- stage("Run ${state} on ${target}") {
+ stage("Run ${state} state on ${target} nodes") {
try {
- salt.enforceState(master, '${target}', '${state}')
+ salt.enforceState(master, "${target}", ["${state}"], true)
} catch (Exception er) {
errorOccured = true
- common.errorMsg('${state} state was executed and failed. Please fix it manually.')
+ common.errorMsg("${state} state was executed and failed. Please fix it manually.")
}
}
- out = salt.runSaltCommand(master, 'local', ['expression': '${target}', 'type': 'compound'], command, null, 'systemctl status ${service}.service', null)
+ out = salt.runSaltCommand(master, 'local', ['expression': "${target}", 'type': 'compound'], command, null, "systemctl status ${service}.service", null)
salt.printSaltCommandResult(out)
- common.warningMsg('Please check \'systemctl status ${service}.service\' on ${target} nodes if ${service} is running.')
+ common.warningMsg("Please check \'systemctl status ${service}.service\' on ${target} nodes if ${service} is running.")
return
}
def upgrade_es_kibana(master) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+ def command = 'cmd.run'
stage('Elasticsearch upgrade') {
try {
salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
@@ -76,6 +81,7 @@
def retries_wait = 20
def retries = 15
def elasticsearch_vip
+ def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
if(!pillar['return'].isEmpty()) {
elasticsearch_vip = pillar['return'][0].values()[0]
} else {
@@ -136,9 +142,10 @@
if (salt.testTarget(pepperEnv, "I@prometheus:exporters:jmx")) {
upgrade(pepperEnv, "I@prometheus:exporters:jmx", "jmx-exporter", "jmx-exporter", "prometheus")
}
- if (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured) {
- upgrade_es_kibana(pepperEnv)
- }
+ }
+
+ if (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured) {
+ upgrade_es_kibana(pepperEnv)
}
if (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured) {
@@ -146,9 +153,9 @@
stage('Docker components upgrade') {
try {
- salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm monitoring"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm monitoring"], null, true)
salt.enforceState(pepperEnv, 'I@docker:swarm and I@prometheus:server', 'prometheus')
- salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm dashboard"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm dashboard"], null, true)
salt.enforceState(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'docker')
salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
salt.enforceState(pepperEnv, 'I@grafana:client', 'grafana.client')
diff --git a/test-cookiecutter-reclass-chunk.groovy b/test-cookiecutter-reclass-chunk.groovy
index b1266a3..cdc6e1e 100644
--- a/test-cookiecutter-reclass-chunk.groovy
+++ b/test-cookiecutter-reclass-chunk.groovy
@@ -34,6 +34,10 @@
'dockerContainerName': extraVars.DockerCName,
'testContext': extraVars.modelFile
]
+ if (extraVars.useExtraRepos) {
+ config['extraRepos'] = extraVars.extraRepos ? extraVars.extraRepos : [:]
+ config['extraRepoMergeStrategy'] = extraVars.extraRepoMergeStrategy ? extraVars.extraRepoMergeStrategy : ''
+ }
saltModelTesting.testNode(config)
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 8826bc1..d93a618 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -17,7 +17,7 @@
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
-def extraVarsYAML = env.EXTRA_VARIABLES_YAML ?: false
+extraVarsYAML = env.EXTRA_VARIABLES_YAML.trim() ?: ''
if (extraVarsYAML) {
common.mergeEnv(env, extraVarsYAML)
}
@@ -55,83 +55,11 @@
// version of debRepos, aka formulas|reclass|ubuntu
testDistribRevision = env.DISTRIB_REVISION ?: 'nightly'
+
// Name of sub-test chunk job
chunkJobName = "test-mk-cookiecutter-templates-chunk"
testModelBuildsData = [:]
-def generateSaltMaster(modEnv, clusterDomain, clusterName) {
- def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
- def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
- _param:
- linux_system_codename: xenial
- reclass_data_revision: master
- linux:
- system:
- name: cfg01
- domain: ${clusterDomain}
-"""
- sh "mkdir -p ${modEnv}/nodes/"
- println "Create file ${nodeFile}"
- writeFile(file: nodeFile, text: nodeString)
-}
-
-/**
- *
- * @param contextFile - path to `contexts/XXX.yaml file`
- * @param virtualenv - pyvenv with CC and dep's
- * @param templateEnvDir - root of CookieCutter
- * @return
- */
-
-def generateModel(contextFile, virtualenv, templateEnvDir) {
- def modelEnv = "${templateEnvDir}/model"
- def basename = common.GetBaseName(contextFile, '.yml')
- def generatedModel = "${modelEnv}/${basename}"
- def content = readFile(file: "${templateEnvDir}/contexts/${contextFile}")
- def templateContext = readYaml text: content
- def clusterDomain = templateContext.default_context.cluster_domain
- def clusterName = templateContext.default_context.cluster_name
- def outputDestination = "${generatedModel}/classes/cluster/${clusterName}"
- def templateBaseDir = templateEnvDir
- def templateDir = "${templateEnvDir}/dir"
- def templateOutputDir = templateBaseDir
- dir(templateEnvDir) {
- sh(script: "rm -rf ${generatedModel} || true")
- common.infoMsg("Generating model from context ${contextFile}")
- def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
- for (product in productList) {
-
- // get templateOutputDir and productDir
- templateOutputDir = "${templateEnvDir}/output/${product}"
- productDir = product
- templateDir = "${templateEnvDir}/cluster_product/${productDir}"
- // Bw for 2018.8.1 and older releases
- if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
- common.warningMsg("Old release detected! productDir => 'stacklight2' ")
- productDir = "stacklight2"
- templateDir = "${templateEnvDir}/cluster_product/${productDir}"
- }
- if (product == "infra" || (templateContext.default_context["${product}_enabled"]
- && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
- common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
- sh "rm -rf ${templateOutputDir} || true"
- sh "mkdir -p ${templateOutputDir}"
- sh "mkdir -p ${outputDestination}"
-
- python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, virtualenv, templateBaseDir)
- sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
- } else {
- common.warningMsg("Product " + product + " is disabled")
- }
- }
- generateSaltMaster(generatedModel, clusterDomain, clusterName)
- }
-}
-
def getAndUnpackNodesInfoArtifact(jobName, copyTo, build) {
return {
dir(copyTo) {
@@ -142,18 +70,20 @@
}
}
-def testModel(modelFile, reclassArtifactName, artifactCopyPath) {
+def testModel(modelFile, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
// modelFile - `modelfiname` from model/modelfiname/modelfiname.yaml
//* Grub all models and send it to check in paralell - by one in thread.
def _uuid = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}_" + UUID.randomUUID().toString().take(8)
def _values_string = """
- ---
- MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}"
- DockerCName: "${_uuid}"
- testReclassEnv: "model/${modelFile}/"
- modelFile: "contexts/${modelFile}.yml"
- DISTRIB_REVISION: "${testDistribRevision}"
- """
+---
+MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}"
+DockerCName: "${_uuid}"
+testReclassEnv: "model/${modelFile}/"
+modelFile: "contexts/${modelFile}.yml"
+DISTRIB_REVISION: "${testDistribRevision}"
+useExtraRepos: ${useExtraRepos}
+${extraVarsYAML.replaceAll('---', '')}
+"""
def chunkJob = build job: chunkJobName, parameters: [
[$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML',
value : _values_string.stripIndent()],
@@ -164,7 +94,7 @@
'buildId' : "${chunkJob.number}"])
}
-def StepTestModel(basename, reclassArtifactName, artifactCopyPath) {
+def StepTestModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
// We need to wrap what we return in a Groovy closure, or else it's invoked
// when this method is called, not when we pass it to parallel.
// To do this, you need to wrap the code below in { }, and either return
@@ -172,7 +102,7 @@
// return node object
return {
node(slaveNode) {
- testModel(basename, reclassArtifactName, artifactCopyPath)
+ testModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos)
}
}
}
@@ -212,7 +142,9 @@
def StepGenerateModels(_contextFileList, _virtualenv, _templateEnvDir) {
return {
for (contextFile in _contextFileList) {
- generateModel(contextFile, _virtualenv, _templateEnvDir)
+ def basename = common.GetBaseName(contextFile, '.yml')
+ def context = readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
+ python.generateModel(context, basename, 'cfg01', _virtualenv, "${_templateEnvDir}/model", _templateEnvDir)
}
}
}
@@ -430,7 +362,7 @@
common.infoMsg("Found: ${contextFileListPatched.size()} patched contexts to test.")
for (String context : contextFileListPatched) {
def basename = common.GetBaseName(context, '.yml')
- stepsForParallel.put("ContextPatchedTest:${basename}", StepTestModel(basename, patchedReclassArtifactName, reclassInfoPatchedPath))
+ stepsForParallel.put("ContextPatchedTest:${basename}", StepTestModel(basename, patchedReclassArtifactName, reclassInfoPatchedPath, true))
}
parallel stepsForParallel
common.infoMsg('All TestContexts tests done')