Merge "Add RESTORE_TYPE to galera restore pipeline"
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
new file mode 100644
index 0000000..96ca29d
--- /dev/null
+++ b/ceph-add-osd-upmap.groovy
@@ -0,0 +1,133 @@
+/**
+ *
+ * Add Ceph node to existing cluster using upmap mechanism
+ *
+ * Requred parameters:
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * HOST Host (minion id) to be added
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+orchestrate = new com.mirantis.mk.Orchestrate()
+
+def waitForHealthy(master, count=0, attempts=100) {
+ // wait for healthy cluster
+ while (count<attempts) {
+ def health = runCephCommand('ceph health')['return'][0].values()[0]
+ if (health.contains('HEALTH_OK')) {
+ common.infoMsg('Cluster is healthy')
+ break;
+ }
+ count++
+ sleep(10)
+ }
+}
+
+def runCephCommand(cmd) {
+ return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+}
+
+def getpgmap(master) {
+ return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+}
+
+def generatemapping(master,pgmap,map) {
+ def pg_new
+ def pg_old
+
+ for ( pg in pgmap )
+ {
+
+ pg_new = pg["up"].minus(pg["acting"])
+ pg_old = pg["acting"].minus(pg["up"])
+
+ for ( i = 0; i < pg_new.size(); i++ )
+ {
+ def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+ map.add(string)
+ }
+
+ }
+}
+
+def pepperEnv = "pepperEnv"
+
+timeout(time: 12, unit: 'HOURS') {
+ node("python") {
+
+ // create connection to salt master
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+ stage ("verify client versions")
+ {
+ def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not E@mon*")
+ for ( node in nodes )
+ {
+ def versions = salt.cmdRun("pepperEnv", node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+ versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
+ if ( versions['client']['group']['release'] != 'luminous' )
+ {
+ throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+ }
+ }
+ }
+
+ stage ("enable luminous compat")
+ {
+ runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+ }
+
+ stage ("enable upmap balancer")
+ {
+ runCephCommand('ceph balancer on')['return'][0].values()[0]
+ runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+ }
+
+
+ stage ("set norebalance")
+ {
+ runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+ }
+
+ stage('Install Ceph OSD') {
+ orchestrate.installCephOsd(pepperEnv, HOST)
+ }
+
+ def mapping = []
+
+ stage ("update mappings")
+ {
+ def pgmap1 = getpgmap(pepperEnv)
+ if ( pgmap1 == '' )
+ {
+ return 1
+ }
+ else
+ {
+ def pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap1)
+ for(int x=1; x<=3; x++){
+ pgmap1 = getpgmap(pepperEnv)
+ generatemapping(pepperEnv,pgmap,mapping)
+ mapping.each(this.&runCephCommand)
+ sleep(30)
+ }
+ }
+
+ }
+
+ stage ("unset norebalance")
+ {
+ runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+ }
+
+ stage ("wait for healthy cluster")
+ {
+ waitForHealthy(pepperEnv)
+ }
+
+ }
+}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 1de45ee..0e0b106 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -342,6 +342,9 @@
// Install
//
+ // Check if all minions are reachable and ready
+ salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*'])
+
if (common.checkContains('STACK_INSTALL', 'core')) {
stage('Install core infrastructure') {
def staticMgmtNetwork = false
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index ac6b918..72c5cba 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -163,7 +163,12 @@
writeFile file: 'failsafe-ssh-key.pub', text: context['cfg_failsafe_ssh_public_key']
}
if (!fileExists(new File(templateEnv, 'tox.ini').toString())) {
- python.setupCookiecutterVirtualenv(cutterEnv)
+ reqs = new File(templateEnv, 'requirements.txt').toString()
+ if (fileExists(reqs)) {
+ python.setupVirtualenv(cutterEnv, 'python2', [], reqs)
+ } else {
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ }
python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, modelEnv, templateEnv, false)
} else {
// tox-based CC generated structure of reclass,from the root. Otherwise for bw compat, modelEnv
@@ -232,7 +237,7 @@
sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
args = [
- "--user-data user_data", , "--model ${modelEnv}",
+ "--user-data user_data", "--model ${modelEnv}",
"--mk-pipelines ${pipelineEnv}/mk-pipelines/", "--pipeline-library ${pipelineEnv}/pipeline-library/"
]
if (context['secrets_encryption_enabled'] == 'True') {
@@ -291,7 +296,7 @@
// create cfg config-drive
if (outdateGeneration) {
- args += [ "--hostname ${context['salt_master_hostname']}", "${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso" ]
+ args += ["--hostname ${context['salt_master_hostname']}", "${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"]
sh "./create-config-drive ${args.join(' ')}"
} else {
args += [
@@ -372,14 +377,14 @@
// common.sendNotification(currentBuild.result,"",["slack"])
stage('Save artifacts to Artifactory') {
def artifactory = new com.mirantis.mcp.MCPArtifactory()
- def buildProps = [ "context=${context['cluster_name']}" ]
+ def buildProps = ["context=${context['cluster_name']}"]
if (RequesterEmail != '' && !RequesterEmail.contains('example')) {
buildProps.add("emailTo=${RequesterEmail}")
}
def artifactoryLink = artifactory.uploadJobArtifactsToArtifactory([
- 'artifactory': 'mcp-ci',
+ 'artifactory' : 'mcp-ci',
'artifactoryRepo': "drivetrain-local/${JOB_NAME}/${context['cluster_name']}-${BUILD_NUMBER}",
- 'buildProps': buildProps,
+ 'buildProps' : buildProps,
])
currentBuild.description += "<br/>${artifactoryLink}"
}
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index fe7d189..224040f 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -268,7 +268,7 @@
def nodeShortName = target.tokenize(".")[0]
firstTarget = salt.getFirstMinion(pepperEnv, originalTarget)
- status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no | grep ${nodeShortName} | awk '{print \$2}'"
+ status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no ${nodeShortName} | tail -n+2 | awk '{print \$2}'"
)['return'][0].values()[0].replaceAll('Salt command execution success',''
).replaceAll(',SchedulingDisabled','').trim()
@@ -285,6 +285,13 @@
stage("Rebooting ${target}") {
debian.osReboot(pepperEnv, target)
+ /*
+ * Kubernetes controller manager will mark kubernetes node as NotReady
+ * only after 40 seconds of it's downtime.
+ * Let's wait for 60 sec to be sure that node will reach it's
+ * correct status.
+ */
+ sleep(60)
common.retry(times, delay) {
if(!isNodeReady(pepperEnv, target)) {
error("Node still not in Ready state...")
@@ -345,11 +352,9 @@
}
def executeConformance(pepperEnv, target, k8s_api, mcp_repo) {
- stage("Running conformance tests") {
- def image = buildImageURL(pepperEnv, target, mcp_repo)
- print("Using image: " + image)
- runConformance(pepperEnv, target, k8s_api, image)
- }
+ def image = buildImageURL(pepperEnv, target, mcp_repo)
+ print("Using image: " + image)
+ runConformance(pepperEnv, target, k8s_api, image)
}
def containerDinstalled(pepperEnv, target) {
@@ -669,21 +674,23 @@
def daemonsetMap = buildDaemonsetMap(pepperEnv, ctl_node)
if (CONFORMANCE_RUN_BEFORE.toBoolean()) {
- def target = CTL_TARGET
- def mcp_repo = ARTIFACTORY_URL
- def k8s_api = TEST_K8S_API_SERVER
- firstTarget = salt.getFirstMinion(pepperEnv, target)
- def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
- def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
- def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
- if (containerd_enabled && containerd_installed && conformance_pod_ready) {
- def config = ['master': pepperEnv,
- 'target': firstTarget,
- 'junitResults': false,
- 'autodetect': true]
- test.executeConformance(config)
- } else {
- executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ stage("Perform conformance run before upgrade") {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
+ def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
+ def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
+ if (containerd_enabled && containerd_installed && conformance_pod_ready) {
+ def config = ['master': pepperEnv,
+ 'target': firstTarget,
+ 'junitResults': false,
+ 'autodetect': true]
+ test.executeConformance(config)
+ } else {
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
}
}
@@ -812,21 +819,23 @@
printVersionInfo(pepperEnv, ctl_node)
if (CONFORMANCE_RUN_AFTER.toBoolean()) {
- def target = CTL_TARGET
- def mcp_repo = ARTIFACTORY_URL
- def k8s_api = TEST_K8S_API_SERVER
- firstTarget = salt.getFirstMinion(pepperEnv, target)
- def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
- def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
- def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
- if (containerd_enabled && containerd_installed && conformance_pod_ready) {
- def config = ['master': pepperEnv,
- 'target': firstTarget,
- 'junitResults': false,
- 'autodetect': true]
- test.executeConformance(config)
- } else {
- executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ stage("Perform conformance run after upgrade") {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
+ def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
+ def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
+ if (containerd_enabled && containerd_installed && conformance_pod_ready) {
+ def config = ['master': pepperEnv,
+ 'target': firstTarget,
+ 'junitResults': false,
+ 'autodetect': true]
+ test.executeConformance(config)
+ } else {
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
}
}
} catch (Throwable e) {
diff --git a/networking-test-l2gw-bgpvpn.groovy b/networking-test-l2gw-bgpvpn.groovy
new file mode 100644
index 0000000..0fadbd7
--- /dev/null
+++ b/networking-test-l2gw-bgpvpn.groovy
@@ -0,0 +1,101 @@
+/**
+ *
+ * Deploy env with l2gw and bgpvpn enabled from cc context
+ * using create-mcp-env job and run test on environment and download artifacts
+ *
+ * Expected parameters:
+ * MCP_ENV_PIPELINES_REFSPEC Used by rollout-mcp-env and delete-heat-stack-for-mcp-env
+ * MCP_ENV_HEAT_TEMPLATES_REFSPEC Used by rollout-mcp-env
+ * OPENSTACK_API_PROJECT OpenStack project name
+ * OPENSTACK_HEAT_AZ OpenStack availability zone
+ * OPENSTACK_ENVIRONMENT OpenStack environment
+ * HEAT_STACK_CONTEXT Same as in rollout-mcp-env
+ * STACK_DELETE Remove stack after test
+ * COOKIECUTTER_TEMPLATE_CONTEXT_FILE Path to file with base context from heat-templates
+ * COOKIECUTTER_EXTRA_CONTEXT Overrides base kubernetes_testing context
+ * EXTRA_REPOS Yaml based extra repos metadata to be added during bootstrap phase
+ * STACK_NAME The name of a stack in openstack (will be generated if empty)
+ * CLUSTER_MODEL_OVERRIDES List of cluster model yaml files parameters overrides (same as in create-mcp-env)
+ * SALT_MASTER_URL Full Salt API address.
+ * CLUSTER_MODEL_OVERRIDES List of cluster model yaml files parameters overrides (same as in create-mcp-env)
+ */
+
+common = new com.mirantis.mk.Common()
+
+def setBuildParameters(inputParams, allowedParams){
+ def result = []
+ allowedParams.each { param ->
+ if (inputParams.containsKey(param.name)) {
+ def value = inputParams[param.name]
+ def value_class = 'StringParameterValue'
+ switch (param.type) {
+ case 'boolean':
+ value = value.toBoolean()
+ value_class = 'BooleanParameterValue'
+ break
+ case 'text':
+ value_class = 'TextParameterValue'
+ break
+ }
+ result.add([
+ $class: value_class,
+ name: param.name,
+ value: value,
+ ])
+ }
+ }
+ return result
+}
+
+node ('python') {
+ def stack_name
+ if (common.validInputParam('STACK_NAME')) {
+ stack_name = STACK_NAME
+ } else {
+ stack_name = BUILD_TAG
+ }
+
+ currentBuild.description = stack_name
+
+ try {
+ stage ('Deploy cluster') {
+ deploy_build = build (job: "create-mcp-env", parameters: [
+ [$class: 'StringParameterValue', name: 'REFSPEC', value: MCP_ENV_PIPELINES_REFSPEC],
+ [$class: 'StringParameterValue', name: 'HEAT_TEMPLATES_REFSPEC', value: MCP_ENV_HEAT_TEMPLATES_REFSPEC],
+ [$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: OPENSTACK_API_PROJECT],
+ [$class: 'StringParameterValue', name: 'OS_AZ', value: OPENSTACK_HEAT_AZ],
+ [$class: 'StringParameterValue', name: 'OPENSTACK_ENVIRONMENT', value: OPENSTACK_ENVIRONMENT],
+ [$class: 'StringParameterValue', name: 'STACK_NAME', value: stack_name],
+ [$class: 'StringParameterValue', name: 'COOKIECUTTER_TEMPLATE_CONTEXT_FILE', value: COOKIECUTTER_TEMPLATE_CONTEXT_FILE],
+ [$class: 'TextParameterValue', name: 'HEAT_STACK_CONTEXT', value: HEAT_STACK_CONTEXT],
+ [$class: 'TextParameterValue', name: 'COOKIECUTTER_EXTRA_CONTEXT', value: COOKIECUTTER_EXTRA_CONTEXT],
+ [$class: 'TextParameterValue', name: 'EXTRA_REPOS', value: EXTRA_REPOS],
+ [$class: 'TextParameterValue', name: 'CLUSTER_MODEL_OVERRIDES', value: CLUSTER_MODEL_OVERRIDES],
+ ]
+ )
+ }
+
+ if (Boolean.valueOf(RUN_TESTS)) {
+ stage ('Run networking tests') {
+ common.infoMsg('TODO')
+ }
+ }
+
+ // get salt master url
+ saltMasterUrl = "http://${deploy_build.description.tokenize(' ')[1]}:6969"
+
+ } finally {
+ if (Boolean.valueOf(STACK_DELETE)) {
+ stage ('Delete stack') {
+ common.infoMsg("Trying to delete stack ${stack_name}")
+ build (job: 'delete-heat-stack-for-mcp-env', propagate: true, parameters: [
+ [$class: 'StringParameterValue', name: 'REFSPEC', value: MCP_ENV_PIPELINES_REFSPEC],
+ [$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: OPENSTACK_API_PROJECT],
+ [$class: 'StringParameterValue', name: 'OPENSTACK_ENVIRONMENT', value: OPENSTACK_ENVIRONMENT],
+ [$class: 'StringParameterValue', name: 'STACK_NAME', value: stack_name],
+ ]
+ )
+ }
+ }
+ }
+}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index e530bdc..67d5181 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -15,8 +15,8 @@
import groovy.json.JsonOutput
common = new com.mirantis.mk.Common()
+mcpCommon = new com.mirantis.mcp.Common()
gerrit = new com.mirantis.mk.Gerrit()
-git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
extraVarsYAML = env.EXTRA_VARIABLES_YAML.trim() ?: ''
@@ -132,7 +132,14 @@
return {
for (contextFile in _contextFileList) {
def basename = common.GetBaseName(contextFile, '.yml')
- def context = readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
+ def contextYaml = readYaml text: readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
+ // secrets_encryption overcomplicated for expected 'fast syntax tests'
+ // So, lets disable it. It would be tested only in generate-cookiecutter-products.groovy pipeline
+ if (contextYaml['default_context'].get('secrets_encryption_enabled')) {
+ common.warningMsg('Disabling secrets_encryption_enabled for tests!')
+ contextYaml['default_context']['secrets_encryption_enabled'] = 'False'
+ }
+ context = mcpCommon.dumpYAML(contextYaml)
if (!fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
common.warningMsg('Forming NEW reclass-root structure...')
python.generateModel(context, basename, 'cfg01', _virtualenv, "${_templateEnvDir}/model", _templateEnvDir)
diff --git a/test-model-generator.groovy b/test-model-generator.groovy
index ee7c559..4134ca4 100644
--- a/test-model-generator.groovy
+++ b/test-model-generator.groovy
@@ -144,7 +144,7 @@
}
dir(uiProject) {
python.runVirtualenvCommand("${env.WORKSPACE}/venv",
- "export IMAGE=${uiImage.id}; docker-compose up -d")
+ "export IMAGE=${uiImage.id}; docker-compose -f docker-compose-test.yml up -d")
common.retry(5, 20) {
sh 'curl -v http://127.0.0.1:3000 > /dev/null'
}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index de24a41..705eef8 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -67,11 +67,12 @@
def validateReclassModel(ArrayList saltMinions, String suffix) {
try {
- for(String minion in saltMinions) {
- common.infoMsg("Reclass model validation for minion ${minion}...")
- def ret = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false)
- def reclassInv = ret.values()[0]
- writeFile file: "inventory-${minion}-${suffix}.out", text: reclassInv.toString()
+ dir(suffix) {
+ for(String minion in saltMinions) {
+ common.infoMsg("Reclass model validation for minion ${minion}...")
+ def ret = salt.cmdRun("${workspace}/${venvPepper}", 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0]
+ writeFile file: minion, text: ret.toString()
+ }
}
} catch (Exception e) {
common.errorMsg('Can not validate current Reclass model. Inspect failed minion manually.')
@@ -79,12 +80,17 @@
}
}
-def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix='before', String newSuffix='after') {
- for(String minion in saltMinions) {
- def fileName = "reclass-model-${minion}-diff.out"
- sh "diff -u inventory-${minion}-${oldSuffix}.out inventory-${minion}-${newSuffix}.out > ${fileName} || true"
- archiveArtifacts artifacts: "${fileName}"
+def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix, String newSuffix) {
+ def diffDir = 'diff'
+ dir(diffDir) {
+ for(String minion in saltMinions) {
+ def fileName = "reclass-model-${minion}-diff.out"
+ sh "diff -u ${workspace}/${oldSuffix}/${minion} ${workspace}/${newSuffix}/${minion} > ${fileName} || true"
+ }
}
+ archiveArtifacts artifacts: "${workspace}/${oldSuffix}"
+ archiveArtifacts artifacts: "${workspace}/${newSuffix}"
+ archiveArtifacts artifacts: "${workspace}/${diffDir}"
}
if (common.validInputParam('PIPELINE_TIMEOUT')) {
@@ -96,9 +102,10 @@
}
timeout(time: pipelineTimeout, unit: 'HOURS') {
- node("python") {
+ node("python && docker") {
try {
workspace = common.getWorkspace()
+ deleteDir()
targetMcpVersion = null
if (!common.validInputParam('TARGET_MCP_VERSION') && !common.validInputParam('MCP_VERSION')) {
error('You must specify MCP version in TARGET_MCP_VERSION|MCP_VERSION variable')
@@ -129,6 +136,10 @@
def updatePipelines = ''
def updateLocalRepos = ''
def reclassSystemBranch = ''
+ def reclassSystemBranchDefault = gitTargetMcpVersion
+ if (gitTargetMcpVersion != 'proposed') {
+ reclassSystemBranchDefault = "origin/${gitTargetMcpVersion}"
+ }
def driteTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
if (driteTrainParamsYaml) {
def driteTrainParams = readYaml text: driteTrainParamsYaml
@@ -138,7 +149,7 @@
updateClusterModel = driteTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
updatePipelines = driteTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
updateLocalRepos = driteTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
- reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', gitTargetMcpVersion)
+ reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
} else {
// backward compatibility for 2018.11.0
saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -147,20 +158,23 @@
updateClusterModel = env.getProperty('UPDATE_CLUSTER_MODEL').toBoolean()
updatePipelines = env.getProperty('UPDATE_PIPELINES').toBoolean()
updateLocalRepos = env.getProperty('UPDATE_LOCAL_REPOS').toBoolean()
- reclassSystemBranch = gitTargetMcpVersion
+ reclassSystemBranch = reclassSystemBranchDefault
}
python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
+ def pillarsBeforeSuffix = 'pillarsBefore'
+ def pillarsAfterSuffix = 'pillarsAfter'
def inventoryBeforeFilename = "reclass-inventory-before.out"
def inventoryAfterFilename = "reclass-inventory-after.out"
def minions = salt.getMinions(venvPepper, '*')
+ def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
stage("Update Reclass and Salt-Formulas ") {
- validateReclassModel(minions, 'before')
+ validateReclassModel(minions, pillarsBeforeSuffix)
+ archiveReclassInventory(inventoryBeforeFilename)
- def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
try {
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
}
@@ -211,9 +225,109 @@
"git add -u && git commit --allow-empty -m 'Cluster model update to the release $targetMcpVersion on $dateTime'")
}
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'saltutil.refresh_pillar')
+ try {
+ salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
+ } catch (Exception e) {
+ common.errorMsg("Something wrong with model after UPDATE_CLUSTER_MODEL step. Please check model.")
+ throw e
+ }
+
+ common.infoMsg('Running a check for compatibility with new Reclass/Salt-Formulas packages')
+ def saltModelDir = 'salt-model'
+ def nodesArtifact = 'pillarsFromValidation.tar.gz'
+ def reclassModel = 'reclassModel.tar.gz'
+ def pillarsAfterValidation = 'pillarsFromValidation'
+ try {
+ def repos = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:repo").get("return")[0].values()[0]
+ def cfgInfo = salt.getPillar(venvPepper, 'I@salt:master', "reclass:storage:node:infra_cfg01_node").get("return")[0].values()[0]
+ def docker_image_for_test = salt.getPillar(venvPepper, 'I@salt:master', "_param:docker_image_cvp_sanity_checks").get("return")[0].values()[0]
+ def saltModelTesting = new com.mirantis.mk.SaltModelTesting()
+ def config = [
+ 'dockerHostname': "cfg01",
+ 'distribRevision': "${targetMcpVersion}",
+ 'baseRepoPreConfig': true,
+ 'extraRepoMergeStrategy': 'override',
+ 'dockerContainerName': 'new-reclass-package-check',
+ 'dockerMaxCpus': 1,
+ 'image': docker_image_for_test,
+ 'dockerExtraOpts': [
+ "-v ${env.WORKSPACE}/${saltModelDir}:/srv/salt/reclass",
+ "--entrypoint ''",
+ ],
+ 'extraRepos': ['repo': repos, 'aprConfD': "APT::Get::AllowUnauthenticated 'true';" ],
+ 'envOpts': [ "CLUSTER_NAME=${cluster_name}", "NODES_ARTIFACT_NAME=${nodesArtifact}" ]
+ ]
+ def tarName = '/tmp/currentModel.tar.gz'
+ salt.cmdRun(venvPepper, 'I@salt:master', "tar -cf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass classes")
+ if (cfgInfo == '') {
+ // case for old setups when cfg01 node model was static
+ def node_name = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:name").get("return")[0].values()[0]
+ def node_domain = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:domain").get("return")[0].values()[0]
+ salt.cmdRun(venvPepper, 'I@salt:master', "tar -rf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass nodes/${node_name}.${node_domain}.yml")
+ config['envOpts'].add("CFG_NODE_NAME=${node_name}.${node_domain}")
+ }
+ def modelHash = salt.cmdRun(venvPepper, 'I@salt:master', "cat ${tarName} | gzip -9 -c | base64", false, null, false).get('return')[0].values()[0]
+ writeFile file: 'modelHash', text: modelHash
+ sh "cat modelHash | base64 -d | gzip -d > ${reclassModel}"
+ sh "mkdir ${saltModelDir} && tar -xf ${reclassModel} -C ${saltModelDir}"
+
+ config['runCommands'] = [
+ '001_Install_Salt_Reclass_Packages': { sh('apt-get install -y reclass salt-formula-*') },
+ '002_Get_new_nodes': {
+ try {
+ sh('''#!/bin/bash
+ new_generated_dir=/srv/salt/_new_nodes
+ new_pillar_dir=/srv/salt/_new_pillar
+ reclass_classes=/srv/salt/reclass/classes/
+ mkdir -p ${new_generated_dir} ${new_pillar_dir}
+ nodegenerator -b ${reclass_classes} -o ${new_generated_dir} ${CLUSTER_NAME}
+ for node in $(ls ${new_generated_dir}); do
+ nodeName=$(basename -s .yml ${node})
+ reclass -n ${nodeName} -c ${reclass_classes} -u ${new_generated_dir} > ${new_pillar_dir}/${nodeName}
+ done
+ if [[ -n "${CFG_NODE_NAME}" ]]; then
+ reclass -n ${CFG_NODE_NAME} -c ${reclass_classes} -u /srv/salt/reclass/nodes > ${new_pillar_dir}/${CFG_NODE_NAME}
+ fi
+ tar -czf /tmp/${NODES_ARTIFACT_NAME} -C ${new_pillar_dir}/ .
+ ''')
+ } catch (Exception e) {
+ print "Test new nodegenerator tool is failed: ${e}"
+ throw e
+ }
+ },
+ ]
+ config['runFinally'] = [ '001_Archive_nodegenerator_artefact': {
+ sh(script: "mv /tmp/${nodesArtifact} ${env.WORKSPACE}/${nodesArtifact}")
+ archiveArtifacts artifacts: nodesArtifact
+ }]
+ saltModelTesting.setupDockerAndTest(config)
+ sh "mkdir -p ${pillarsAfterValidation} && tar -xf ${nodesArtifact} --dir ${pillarsAfterValidation}/"
+ def changesFound = false
+ for(String minion in minions) {
+ try {
+ sh (script:"diff -u -w -I '^Salt command execution success' -I '^ node: ' -I '^ uri: ' -I '^ timestamp: ' ${pillarsBeforeSuffix}/${minion} ${pillarsAfterValidation}/${minion}", returnStdout: true)
+ } catch(Exception e) {
+ changesFound = true
+ common.errorMsg("Found diff changes for ${minion} minion")
+ }
+ }
+ if (changesFound) {
+ common.warningMsg('Found diff changes between current pillar data and updated. Inspect logs above.')
+ input message: 'Continue anyway?'
+ } else {
+ common.infoMsg('Diff between current pillar data and updated one - not found.')
+ }
+ } catch (Exception updateErr) {
+ common.warningMsg(updateErr)
+ common.warningMsg('Failed to validate update Salt Formulas repos/packages.')
+ input message: 'Continue anyway?'
+ } finally {
+ sh "rm -rf ${saltModelDir} ${nodesArtifact} ${pillarsAfterValidation} ${reclassModel}"
+ }
+
try {
common.infoMsg('Perform: UPDATE Salt Formulas')
- salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas",'salt.master.env'])
} catch (Exception updateErr) {
@@ -222,8 +336,6 @@
input message: 'Continue anyway?'
}
- archiveReclassInventory(inventoryBeforeFilename)
-
try {
common.infoMsg('Perform: UPDATE Reclass package')
salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'pkg.install', ["reclass"])
@@ -256,12 +368,11 @@
sh "diff -u $inventoryBeforeFilename $inventoryAfterFilename > reclass-inventory-diff.out || true"
archiveArtifacts artifacts: "reclass-inventory-diff.out"
- validateReclassModel(minions, 'after')
- archiveReclassModelChanges(minions)
+ validateReclassModel(minions, pillarsAfterSuffix)
+ archiveReclassModelChanges(minions, pillarsBeforeSuffix, pillarsAfterSuffix)
}
if (updateLocalRepos) {
- def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
stage("Update local repos") {
common.infoMsg("Updating local repositories")