Merge "Add ability for elasticsearch major upgrade"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 8802cea..f05735a 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -447,6 +447,9 @@
}
}
+ // install docker swarm
+ orchestrate.installDockerSwarm(venvPepper, extra_tgt)
+
// install openstack
if (common.checkContains('STACK_INSTALL', 'openstack')) {
// install control, tests, ...
@@ -513,7 +516,6 @@
if (common.checkContains('STACK_INSTALL', 'cicd')) {
stage('Install Cicd') {
orchestrate.installInfra(venvPepper, extra_tgt)
- orchestrate.installDockerSwarm(venvPepper, extra_tgt)
orchestrate.installCicd(venvPepper, extra_tgt)
}
}
@@ -527,7 +529,6 @@
if (common.checkContains('STACK_INSTALL', 'stacklight')) {
stage('Install StackLight') {
- orchestrate.installDockerSwarm(venvPepper, extra_tgt)
orchestrate.installStacklight(venvPepper, extra_tgt)
}
}
@@ -536,7 +537,6 @@
stage('Install OSS') {
if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
// In case if StackLightv2 enabled containers already started
- orchestrate.installDockerSwarm(venvPepper, extra_tgt)
salt.enforceState(venvPepper, "I@docker:swarm:role:master and I@devops_portal:config ${extra_tgt}", 'docker.client', true)
}
orchestrate.installOss(venvPepper, extra_tgt)
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 6b5c0e2..2858e81 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -12,21 +12,21 @@
* PROXY Proxy to use for cloning repo or for pip
* IMAGE Docker image to use for running container with test framework.
* DEBUG_MODE If you need to debug (keep container after test), please enabled this
- * To launch tests from cvp_spt docker images need to set IMAGE and left TESTS_REPO empty
+ * To launch tests from docker images need to set IMAGE and left TESTS_REPO empty
*/
common = new com.mirantis.mk.Common()
validate = new com.mirantis.mcp.Validate()
salt = new com.mirantis.mk.Salt()
salt_testing = new com.mirantis.mk.SaltModelTesting()
-def artifacts_dir = "validation_artifacts/"
+def artifacts_dir = "validation_artifacts"
def remote_dir = '/root/qa_results'
def container_workdir = '/var/lib'
-def name = 'cvp-spt'
-def xml_file = "${name}_report.xml"
+def container_name = "${env.JOB_NAME}"
+def xml_file = "${container_name}_report.xml"
def TARGET_NODE = "I@gerrit:client"
def reinstall_env = false
-def container_name = "${env.JOB_NAME}"
+
def saltMaster
def settings
@@ -76,7 +76,7 @@
def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
def username = creds.username
def password = creds.password
- def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET}"
+ def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET} -vv"
sh "mkdir -p ${artifacts_dir}"
@@ -86,12 +86,12 @@
'dockerMaxCpus': 2,
'dockerExtraOpts' : [
"-v /root/qa_results/:/root/qa_results/",
- "-v ${env.WORKSPACE}/validation_artifacts/:${container_workdir}/validation_artifacts/",
+ "-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
+ // TODO remove if all docker images with tests (like cvp-spt) will be transferred into new architucture (like cvp-sanity)
"--entrypoint=''", // to override ENTRYPOINT=/bin/bash in Dockerfile of image
],
'envOpts' : [
- "WORKSPACE=${container_workdir}/${name}",
"SALT_USERNAME=${username}",
"SALT_PASSWORD=${password}",
"SALT_URL=${SALT_MASTER_URL}"
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index d449cd8..eeb9f71 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -10,7 +10,7 @@
def gerrit = new com.mirantis.mk.Gerrit()
def ssh = new com.mirantis.mk.Ssh()
-slaveNode = env.SLAVE_NODE ?: 'docker'
+slaveNode = env.SLAVE_NODE ?: 'virtual'
giveVerify = false
@NonCPS
@@ -39,6 +39,7 @@
ssh.ensureKnownHosts(GERRIT_HOST)
def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
def doSubmit = false
+ def skipProjectsVerify = ['mk/docker-jnlp-slave']
stage("test") {
if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")) {
// test max CodeReview
@@ -46,30 +47,35 @@
doSubmit = true
def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
- def jobsNamespace = JOBS_NAMESPACE
- def plural_namespaces = ['salt-formulas', 'salt-models']
- // remove plural s on the end of job namespace
- if (JOBS_NAMESPACE in plural_namespaces) {
- jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
- }
- // salt-formulas tests have -latest on end of the name
- if (JOBS_NAMESPACE.equals("salt-formulas")) {
- gerritProject = gerritProject + "-latest"
- }
- def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
- if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
- callJobWithExtraVars('test-salt-model-ci-wrapper')
+ if (gerritProject in skipProjectsVerify) {
+ common.successMsg("Project ${gerritProject} doesn't require verify, skipping...")
+ giveVerify = true
} else {
- if (isJobExists(testJob)) {
- common.infoMsg("Test job ${testJob} found, running")
- def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
- ]
- giveVerify = true
+ def jobsNamespace = JOBS_NAMESPACE
+ def plural_namespaces = ['salt-formulas', 'salt-models']
+ // remove plural s on the end of job namespace
+ if (JOBS_NAMESPACE in plural_namespaces) {
+ jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+ }
+ // salt-formulas tests have -latest on end of the name
+ if (JOBS_NAMESPACE.equals("salt-formulas")) {
+ gerritProject = gerritProject + "-latest"
+ }
+ def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+ if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ callJobWithExtraVars('test-salt-model-ci-wrapper')
} else {
- common.infoMsg("Test job ${testJob} not found")
+ if (isJobExists(testJob)) {
+ common.infoMsg("Test job ${testJob} found, running")
+ def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+ ]
+ giveVerify = true
+ } else {
+ common.infoMsg("Test job ${testJob} not found")
+ }
}
}
} else {
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 33a3f60..9df7611 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -132,10 +132,35 @@
}
stage('Generate model') {
- python.setupCookiecutterVirtualenv(cutterEnv)
- // FIXME refactor generateModel
- python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, modelEnv, templateEnv, false)
- git.commitGitChanges(modelEnv, "Create model ${context['cluster_name']}", "${user}@localhost", "${user}")
+ // GNUPGHOME environment variable is required for all gpg commands
+ // and for python.generateModel execution
+ withEnv(["GNUPGHOME=${env.WORKSPACE}/gpghome"]) {
+ if (context['secrets_encryption_enabled'] == 'True') {
+ sh "mkdir gpghome; chmod 700 gpghome"
+ def secretKeyID = RequesterEmail ?: "salt@${context['cluster_domain']}".toString()
+ if (!context.get('secrets_encryption_private_key')) {
+ def batchData = """
+ Key-Type: 1
+ Key-Length: 4096
+ Expire-Date: 0
+ Name-Real: ${context['salt_master_hostname']}.${context['cluster_domain']}
+ Name-Email: ${secretKeyID}
+ """.stripIndent()
+ writeFile file:'gpg-batch.txt', text:batchData
+ sh "gpg --gen-key --batch < gpg-batch.txt"
+ sh "gpg --export-secret-key -a ${secretKeyID} > gpgkey.asc"
+ } else {
+ writeFile file:'gpgkey.asc', text:context['secrets_encryption_private_key']
+ sh "gpg --import gpgkey.asc"
+ secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | awk -F: -e "/^sec/{print \\$5; exit}"').trim()
+ }
+ context['secrets_encryption_key_id'] = secretKeyID
+ }
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ // FIXME refactor generateModel
+ python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, modelEnv, templateEnv, false)
+ git.commitGitChanges(modelEnv, "Create model ${context['cluster_name']}", "${user}@localhost", "${user}")
+ }
}
stage("Test") {
@@ -183,6 +208,9 @@
sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
args = "--user-data user_data --hostname ${context['salt_master_hostname']} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"
+ if (context['secrets_encryption_enabled'] == 'True') {
+ args = "--gpg-key gpgkey.asc " + args
+ }
// load data from model
def smc = [:]
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index a09ae85..1c168f3 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -139,11 +139,15 @@
}
}
-def performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, target) {
+def performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, target, ctl_node) {
def salt = new com.mirantis.mk.Salt()
stage("Performing Calico configuration update and services restart") {
- salt.enforceState(pepperEnv, target, "kubernetes.pool.calico")
+ if (containerDenabled(pepperEnv, ctl_node)) {
+ salt.enforceState(pepperEnv, target, "kubernetes.pool")
+ } else {
+ salt.enforceState(pepperEnv, target, "kubernetes.pool.calico")
+ }
salt.runSaltProcessStep(pepperEnv, target, 'service.restart', ['kubelet'])
}
}
@@ -279,14 +283,141 @@
)['return'][0].values()[0].replaceAll('Salt command execution success','').trim().toBoolean()
}
-def checkCalicoUpgradeSuccessful(pepperEnv, target) {
+def calicoEnabled(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ return salt.getPillar(pepperEnv, target, "kubernetes:pool:network:calico:enabled"
+ )["return"][0].values()[0].toBoolean()
+}
+
+def checkCalicoClusterState(pepperEnv, target) {
+ def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
- stage("Checking cluster state after Calico upgrade") {
- // TODO add auto-check of results
- salt.cmdRun(pepperEnv, target, "calicoctl version | grep -i version")
- salt.cmdRun(pepperEnv, target, "calicoctl node status")
- salt.cmdRun(pepperEnv, target, "calicoctl node checksystem")
+ stage("Checking Calico cluster state after upgrade") {
+ // check Calico cluster and cli clients versions
+ def checkVer = [
+ "Client Version:": [verStr: "", dif: false, wrong: false],
+ "Cluster Version:": [verStr: "", dif: false, wrong: false]
+ ]
+ def checkVerPassed = true
+ def versionResults = salt.cmdRun(pepperEnv, target, "calicoctl version | grep -i version")['return'][0]
+ versionResults.each { k, v ->
+ // println("Node:\n${k}\nResult:\n${v}")
+ for (verLine in v.split("\n")) {
+ for (verType in checkVer.keySet()) {
+ if (verLine.contains(verType)) {
+ def verRec = checkVer[verType]
+ ver = (verLine - verType).trim()
+ if (!verRec.verStr) {
+ verRec.verStr = ver
+ }
+ if (verRec.verStr != ver) {
+ verRec.dif = true
+ checkVerPassed = false
+ }
+ version = ver.tokenize(".")
+ if ((version.size() < 3) || (version[0] != "v3")) {
+ verRec.wrong = true
+ checkVerPassed = false
+ }
+ checkVer[verType] = verRec
+ }
+ }
+ }
+ }
+ if (checkVerPassed) {
+ common.infoMsg("Calico version verification passed")
+ }
+ else {
+ def warningMsg = "Calico version verification failed.\n"
+ checkVer.each { k, rec ->
+ if (rec.dif) {
+ warningMsg += "${k} versions are different across nodes.\n"
+ }
+ if (rec.wrong) {
+ warningMsg += "${k} (some) versions are wrong - should be v3.x.\n"
+ }
+ }
+ common.warningMsg(warningMsg)
+ currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
+ }
+
+ // check Calico nodes' statuses
+ def nodeStatusResults = salt.cmdRun(pepperEnv, target, "calicoctl node status")['return'][0]
+ def nodesRunning = true
+ def peersNotFound = []
+ def peersNotOnline = []
+ nodeStatusResults.each { k, v ->
+ // println("Node:\n${k}\nResult:\n${v}")
+ if (!v.contains("Calico process is running")) {
+ nodesRunning = false
+ def warningMsg = "Node ${k}: Calico node is not running."
+ common.warningMsg(warningMsg)
+ currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
+ }
+ def nodePeersFound = false
+ def nodePeersOnline = true
+ for (nodeLine in v.split("\n")) {
+ if (nodeLine.contains("|") && (!nodeLine.contains("STATE"))) {
+ def col = nodeLine.tokenize("|").collect{it.trim()}
+ if (col.size() == 5) {
+ nodePeersFound = true
+ if ((col[2] != "up") || (col[4] != "Established")) {
+ def warningMsg = "Node ${k}: BGP peer '${col[0]}' is out of reach. Peer state: '${col[2]}', connection info: '${col[4]}'."
+ common.warningMsg(warningMsg)
+ currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
+ nodePeersOnline = false
+ }
+ }
+ }
+ }
+ if (!nodePeersFound) {
+ peersNotFound += k
+ }
+ if (!nodePeersOnline) {
+ peersNotOnline += k
+ }
+ }
+ if (nodesRunning) {
+ common.infoMsg("All the Calico nodes are running")
+ }
+ if (peersNotFound) {
+ def warningMsg = "BGP peers not found for the node(s): " + peersNotFound.join(', ') + "."
+ common.warningMsg(warningMsg)
+ currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
+ } else {
+ common.infoMsg("BGP peers were found for all the nodes")
+ }
+ if (!peersNotOnline) {
+ common.infoMsg("All reported BGP peers are reachable")
+ }
+
+ // check that 'calico-kube-controllers' is running
+ // one CTL node will be used to get pod's state using kubectl
+ def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
+ def kubeCtrlResult = salt.cmdRun(
+ pepperEnv, ctl_node, "kubectl get pod -n kube-system --selector=k8s-app=calico-kube-controllers"
+ )['return'][0].values()[0].toString()
+ if (kubeCtrlResult.contains("calico-kube-controllers")) {
+ for (line in kubeCtrlResult.split("\n")) {
+ if (line.contains("calico-kube-controllers")) {
+ col = line.tokenize(" ")
+ if ((col[1] != "1/1") || (col[2] != "Running")) {
+ def warningMsg = "Calico kube-controllers pod is not running properly."
+ common.warningMsg(warningMsg)
+ currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
+ }
+ else {
+ common.infoMsg("Calico kube-controllers pod is running.")
+ }
+ break
+ }
+ }
+ } else {
+ def warningMsg = "Calico kube-controllers pod was not scheduled."
+ common.warningMsg(warningMsg)
+ currentBuild.description += "<br><b>${warningMsg}</b><br><br>"
+ }
}
}
@@ -494,11 +625,9 @@
// this sequence implies workloads operations downtime
startCalicoUpgrade(pepperEnv, ctl_node)
- performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, POOL)
+ performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, POOL, ctl_node)
completeCalicoUpgrade(pepperEnv, ctl_node)
- // after that no downtime is expected
-
- checkCalicoUpgradeSuccessful(pepperEnv, POOL)
+ // no downtime is expected after this point
}
/*
@@ -561,6 +690,11 @@
}
}
+ def ctl_node = salt.getMinionsSorted(pepperEnv, CTL_TARGET)[0]
+ if (calicoEnabled(pepperEnv, ctl_node)) {
+ checkCalicoClusterState(pepperEnv, POOL)
+ }
+
if (CONFORMANCE_RUN_AFTER.toBoolean()) {
def target = CTL_TARGET
def mcp_repo = ARTIFACTORY_URL
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 45eeef8..52c7d79 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -62,7 +62,7 @@
]
}
-timeout(time: 2, unit: 'HOURS') {
+timeout(time: 4, unit: 'HOURS') {
node(slaveNode) {
try {
if (fileExists("tests/build")) {