Merge "Updates for stacklight upgrade pipeline"
diff --git a/aptly-promote-pipeline.groovy b/aptly-promote-pipeline.groovy
index b68bc81..1d12d97 100644
--- a/aptly-promote-pipeline.groovy
+++ b/aptly-promote-pipeline.groovy
@@ -28,22 +28,24 @@
timeout(time: 12, unit: 'HOURS') {
node("docker&&hardware") {
try {
+ if ("testing" in TARGET && !jenkinsUtils.currentUserInGroup(["release-engineering", "aptly-promote-users"])) {
+ insufficientPermissions = true
+ throw new Exception("Only release-engineering or aptly-promote-users can perform promote to testing.")
+ } else if (!jenkinsUtils.currentUserInGroup(["release-engineering"])) {
+ insufficientPermissions = true
+ throw new Exception("Only release-engineering team can perform promote.")
+ }
stage("promote") {
// promote is restricted to users in aptly-promote-users LDAP group
- if (jenkinsUtils.currentUserInGroups(["mcp-cicd-admins", "aptly-promote-users"])) {
- lock("aptly-api") {
- for (storage in storages) {
- if (storage == "local") {
- storage = ""
- }
- retry(2) {
- aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
- }
+ lock("aptly-api") {
+ for (storage in storages) {
+ if (storage == "local") {
+ storage = ""
+ }
+ retry(2) {
+ aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
}
}
- } else {
- insufficientPermissions = true
- throw new Exception(String.format("You don't have permissions to make aptly promote from source:%s to target:%s! Only CI/CD and QA team can perform aptly promote.", SOURCE, TARGET))
}
}
} catch (Throwable e) {
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 8dea65b..92701bd 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -10,14 +10,12 @@
* PER_NODE Target nodes will be managed one by one (bool)
* ROLLBACK_BY_REDEPLOY Omit taking live snapshots. Rollback is planned to be done by redeployment (bool)
* STOP_SERVICES Stop API services before update (bool)
- * TARGET_KERNEL_UPDATES Comma separated list of nodes to update kernel if newer version is available (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_REBOOT Comma separated list of nodes to reboot after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_HIGHSTATE Comma separated list of nodes to run Salt Highstate on after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_UPDATES Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_ROLLBACKS Comma separated list of nodes to rollback (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
- * TARGET_SNAPSHOT_MERGES Comma separated list of nodes to merge live snapshot for (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
- * CTL_TARGET Salt targeted CTL nodes (ex. ctl*)
- * PRX_TARGET Salt targeted PRX nodes (ex. prx*)
+ * TARGET_KERNEL_UPDATES Comma separated list of nodes to update kernel if newer version is available (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_REBOOT Comma separated list of nodes to reboot after update or physical machine rollback (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_HIGHSTATE Comma separated list of nodes to run Salt Highstate on after update or physical machine rollback (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_UPDATES Comma separated list of nodes to update (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_ROLLBACKS Comma separated list of nodes to rollback (Valid values are msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,kvm,osd)
+ * TARGET_SNAPSHOT_MERGES Comma separated list of nodes to merge live snapshot for (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid)
* MSG_TARGET Salt targeted MSG nodes (ex. msg*)
* DBS_TARGET Salt targeted DBS nodes (ex. dbs*)
* LOG_TARGET Salt targeted LOG nodes (ex. log*)
@@ -28,10 +26,8 @@
* CMN_TARGET Salt targeted CMN nodes (ex. cmn*)
* RGW_TARGET Salt targeted RGW nodes (ex. rgw*)
* CID_TARGET Salt targeted CID nodes (ex. cid*)
- * CMP_TARGET Salt targeted physical compute nodes (ex. cmp001*)
* KVM_TARGET Salt targeted physical KVM nodes (ex. kvm01*)
* CEPH_OSD_TARGET Salt targeted physical Ceph OSD nodes (ex. osd001*)
- * GTW_TARGET Salt targeted physical or virtual GTW nodes (ex. gtw01*)
* ROLLBACK_PKG_VERSIONS Space separated list of pkgs=versions to rollback to on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
* PURGE_PKGS Space separated list of pkgs=versions to be purged on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
* REMOVE_PKGS Space separated list of pkgs=versions to be removed on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
@@ -107,7 +103,7 @@
if (targetPackages != "") {
// list installed versions of pkgs that will be upgraded
- if (targetType == 'kvm' || targetType == 'cmp' || targetType == 'osd' || targetType == 'gtw-physical') {
+ if (targetType == 'kvm' || targetType == 'osd') {
def installedPkgs = []
def newPkgs = []
def targetPkgList = targetPackages.tokenize(',')
@@ -893,48 +889,6 @@
}
}
- if (updates.contains("ctl")) {
- def target = CTL_TARGET
- def type = 'ctl'
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- liveSnapshot(pepperEnv, target, type)
- }
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyAPIs(pepperEnv, target)
- }
- }
-
- if (updates.contains("prx")) {
- def target = PRX_TARGET
- def type = 'prx'
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- liveSnapshot(pepperEnv, target, type)
- }
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'nginx')
- }
- }
-
if (updates.contains("msg")) {
def target = MSG_TARGET
def type = 'msg'
@@ -1023,27 +977,6 @@
}
}
- if (updates.contains("gtw-virtual")) {
- def target = GTW_TARGET
- def type = 'gtw-virtual'
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- liveSnapshot(pepperEnv, target, type)
- }
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- }
- }
-
if (updates.contains("cmn")) {
def target = CMN_TARGET
def type = 'cmn'
@@ -1161,27 +1094,6 @@
}
}
- //
- //physical machines update CMP_TARGET
- //
- if (updates.contains("cmp")) {
- def target = CMP_TARGET
- def type = 'cmp'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'nova-compute')
- }
- }
-
if (updates.contains("kvm")) {
def target = KVM_TARGET
def type = 'kvm'
@@ -1218,24 +1130,6 @@
}
}
- if (updates.contains("gtw-physical")) {
- def target = GTW_TARGET
- def type = 'gtw-physical'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- }
- }
-
/*
* Rollback section
*/
@@ -1249,30 +1143,6 @@
}
} */
- if (rollbacks.contains("ctl")) {
- def target = CTL_TARGET
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- rollback(pepperEnv, target, 'ctl')
- verifyAPIs(pepperEnv, target)
- } else {
- removeNode(pepperEnv, target, 'ctl')
- }
- }
- }
-
- if (rollbacks.contains("prx")) {
- def target = PRX_TARGET
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- rollback(pepperEnv, target, 'prx')
- verifyService(pepperEnv, target, 'nginx')
- } else {
- removeNode(pepperEnv, target, 'prx')
- }
- }
- }
-
if (rollbacks.contains("msg")) {
def target = MSG_TARGET
if (salt.testTarget(pepperEnv, target)) {
@@ -1323,18 +1193,6 @@
}
}
- if (rollbacks.contains("gtw-virtual")) {
- def target = GTW_TARGET
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- rollback(pepperEnv, target, 'gtw')
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- } else {
- removeNode(pepperEnv, target, 'gtw')
- }
- }
- }
-
if (rollbacks.contains("cmn")) {
def target = CMN_TARGET
if (salt.testTarget(pepperEnv, target)) {
@@ -1401,27 +1259,6 @@
}
} */
- //
- //physical machines rollback CMP_TARGET
- //
- if (rollbacks.contains("cmp")) {
- def target = CMP_TARGET
- def type = 'cmp'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t, type)
- }
- } else {
- rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'nova-compute')
- }
- }
-
if (rollbacks.contains("kvm")) {
def target = KVM_TARGET
def type = 'kvm'
@@ -1458,24 +1295,6 @@
}
}
- if (rollbacks.contains("gtw-physical")) {
- def target = GTW_TARGET
- def type = 'gtw-physical'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t, type)
- }
- } else {
- rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- }
- }
-
/*
* Merge snapshots section
*/
@@ -1485,20 +1304,6 @@
}
}
- if (merges.contains("ctl")) {
- if (salt.testTarget(pepperEnv, CTL_TARGET)) {
- mergeSnapshot(pepperEnv, CTL_TARGET, 'ctl')
- verifyService(pepperEnv, CTL_TARGET, 'nova-api')
- }
- }
-
- if (merges.contains("prx")) {
- if (salt.testTarget(pepperEnv, PRX_TARGET)) {
- mergeSnapshot(pepperEnv, PRX_TARGET, 'prx')
- verifyService(pepperEnv, PRX_TARGET, 'nginx')
- }
- }
-
if (merges.contains("msg")) {
if (salt.testTarget(pepperEnv, MSG_TARGET)) {
mergeSnapshot(pepperEnv, MSG_TARGET, 'msg')
@@ -1529,13 +1334,6 @@
}
}
- if (merges.contains("gtw-virtual")) {
- if (salt.testTarget(pepperEnv, GTW_TARGET)) {
- mergeSnapshot(pepperEnv, GTW_TARGET, 'gtw')
- verifyService(pepperEnv, GTW_TARGET, 'neutron-dhcp-agent')
- }
- }
-
if (merges.contains("cmn")) {
if (salt.testTarget(pepperEnv, CMN_TARGET)) {
mergeSnapshot(pepperEnv, CMN_TARGET, 'cmn')
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 7cf8e28..6b5c0e2 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -10,27 +10,34 @@
* TESTS_REPO Repo to clone
* TESTS_SETTINGS Additional environment varibales to apply
* PROXY Proxy to use for cloning repo or for pip
- * TEST_IMAGE Docker image link or name to use for running container with test framework.
+ * IMAGE Docker image to use for running container with test framework.
* DEBUG_MODE If you need to debug (keep container after test), please enabled this
- *
+ * To launch tests from cvp_spt docker images need to set IMAGE and left TESTS_REPO empty
*/
common = new com.mirantis.mk.Common()
validate = new com.mirantis.mcp.Validate()
salt = new com.mirantis.mk.Salt()
-def artifacts_dir = 'validation_artifacts/'
-def remote_dir = '/root/qa_results/'
+salt_testing = new com.mirantis.mk.SaltModelTesting()
+def artifacts_dir = "validation_artifacts/"
+def remote_dir = '/root/qa_results'
def container_workdir = '/var/lib'
+def name = 'cvp-spt'
+def xml_file = "${name}_report.xml"
def TARGET_NODE = "I@gerrit:client"
def reinstall_env = false
def container_name = "${env.JOB_NAME}"
def saltMaster
def settings
-node() {
+slaveNode = (env.getProperty('SLAVE_NODE')) ?: 'docker'
+imageName = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-spt:stable'
+
+node(slaveNode) {
try{
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ // TODO collaps TESTS_SETTINGS flow into EXTRA variables map
if ( TESTS_SETTINGS != "" ) {
for (var in TESTS_SETTINGS.tokenize(";")) {
key = var.tokenize("=")[0].trim()
@@ -50,11 +57,11 @@
validate.prepareVenv(TESTS_REPO, PROXY)
} else {
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
- salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}")
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}/")
+ salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}/")
validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
if ( TESTS_REPO != "") {
- salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/cvp*")
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/${container_name}")
salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
if ( reinstall_env ) {
@@ -66,8 +73,36 @@
}
stage('Run Tests') {
+ def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
+ def username = creds.username
+ def password = creds.password
+ def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET}"
+
sh "mkdir -p ${artifacts_dir}"
- validate.runPyTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, TESTS_SETTINGS.tokenize(";"), container_name, TARGET_NODE, remote_dir, artifacts_dir)
+
+ def configRun = [
+ 'image': imageName,
+ 'baseRepoPreConfig': false,
+ 'dockerMaxCpus': 2,
+ 'dockerExtraOpts' : [
+ "-v /root/qa_results/:/root/qa_results/",
+ "-v ${env.WORKSPACE}/validation_artifacts/:${container_workdir}/validation_artifacts/",
+ "--entrypoint=''", // to override ENTRYPOINT=/bin/bash in Dockerfile of image
+ ],
+
+ 'envOpts' : [
+ "WORKSPACE=${container_workdir}/${name}",
+ "SALT_USERNAME=${username}",
+ "SALT_PASSWORD=${password}",
+ "SALT_URL=${SALT_MASTER_URL}"
+ ] + TESTS_SETTINGS.replaceAll('\\"', '').tokenize(";"),
+ 'runCommands' : [
+ '010_start_tests' : {
+ sh("cd ${container_workdir} && ${script}")
+ }
+ ]
+ ]
+ salt_testing.setupDockerAndTest(configRun)
}
stage ('Publish results') {
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 636c666..0de5590 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -37,6 +37,30 @@
}
}
+def getImageInfo(String imageName) {
+ String unique_image_id = sh(
+ script: "docker inspect --format='{{index .RepoDigests 0}}' '${imageName}'",
+ returnStdout: true,
+ ).trim()
+ String imageSha256 = unique_image_id.tokenize(':')[1]
+ common.infoMsg("Docker ${imageName} image sha256 is ${imageSha256}")
+ return [ 'id': unique_image_id, 'sha256': imageSha256 ]
+}
+
+def imageURL(String registry, String imageName, String sha256) {
+ def ret = new URL("https://${registry}/artifactory/api/search/checksum?sha256=${sha256}").getText()
+ // Most probably, we would get many images, especially for external images. We need to guess
+ // exactly one, which we pushing now
+ def tgtGuessImage = imageName.replace(':', '/').replace(registry, '')
+ ArrayList img_data = new JsonSlurper().parseText(ret)['results']
+ def tgtImgUrl = img_data*.uri.find { it.contains(tgtGuessImage) }
+ if (tgtImgUrl) {
+ return tgtImgUrl
+ } else {
+ error("Can't find image ${imageName} in registry ${registry} with sha256: ${sha256}!")
+ }
+}
+
timeout(time: 4, unit: 'HOURS') {
node(slaveNode) {
def user = ''
@@ -78,6 +102,7 @@
common.retry(3, 5) {
srcImage.pull()
}
+ source_image_sha256 = getImageInfo(sourceImage)['sha256']
// Use sh-docker call for tag, due magic code in plugin:
// https://github.com/jenkinsci/docker-workflow-plugin/blob/docker-workflow-1.17/src/main/resources/org/jenkinsci/plugins/docker/workflow/Docker.groovy#L168-L170
sh("docker tag ${srcImage.id} ${targetImageFull}")
@@ -92,18 +117,10 @@
if (setDefaultArtifactoryProperties) {
common.infoMsg("Processing artifactory props for : ${targetImageFull}")
LinkedHashMap artifactoryProperties = [:]
- // Get digest of pushed image
- String unique_image_id = sh(
- script: "docker inspect --format='{{index .RepoDigests 0}}' '${targetImageFull}'",
- returnStdout: true,
- ).trim()
- def image_sha256 = unique_image_id.tokenize(':')[1]
- def ret = new URL("https://${targetRegistry}/artifactory/api/search/checksum?sha256=${image_sha256}").getText()
- // Most probably, we would get many images, especially for external images. We need to guess
- // exactly one, which we pushing now
- guessImage = targetImageFull.replace(':', '/').replace(targetRegistry, '')
- ArrayList img_data = new JsonSlurper().parseText(ret)['results']
- def imgUrl = img_data*.uri.find { it.contains(guessImage) } - '/manifest.json'
+ def tgtImageInfo = getImageInfo(targetImageFull)
+ def tgt_image_sha256 = tgtImageInfo['sha256']
+ def unique_image_id = tgtImageInfo['id']
+ def tgtImgUrl = imageURL(targetRegistry, targetImageFull, tgt_image_sha256) - '/manifest.json'
artifactoryProperties = [
'com.mirantis.targetTag' : env.IMAGE_TAG,
'com.mirantis.uniqueImageId': unique_image_id,
@@ -111,9 +128,11 @@
if (external) {
artifactoryProperties << ['com.mirantis.externalImage': external]
}
- def existingProps = mcp_artifactory.getPropertiesForArtifact(imgUrl)
+ def sourceRegistry = sourceImage.split('/')[0]
+ def sourceImgUrl = imageURL(sourceRegistry, sourceImage, source_image_sha256) - '/manifest.json'
+ def existingProps = mcp_artifactory.getPropertiesForArtifact(sourceImgUrl)
def historyProperties = []
- // check does image have already some props
+ // check does the source image have already history props
if (existingProps) {
historyProperties = existingProps.get('com.mirantis.versionHistory', [])
}
@@ -122,7 +141,7 @@
artifactoryProperties << [ 'com.mirantis.versionHistory': historyProperties.join(',') ]
common.infoMsg("artifactoryProperties=> ${artifactoryProperties}")
common.retry(3, 5) {
- mcp_artifactory.setProperties(imgUrl, artifactoryProperties)
+ mcp_artifactory.setProperties(tgtImgUrl, artifactoryProperties)
}
}
}
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 980aa2f..f9a16e0 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -24,6 +24,8 @@
* CALICO_UPGRADE_VERSION Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
*
**/
+import groovy.json.JsonSlurper
+
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
@@ -323,6 +325,85 @@
}
}
+def checkCalicoPolicySetting(pepperEnv, target) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Checking of Calico network policy setting") {
+ // check Calico policy enabled
+ def cniPolicy = false
+ def addonsPolicy = false
+ def kubeCtrlRunning = false
+
+ // check CNI config
+ def cniCfgResult = salt.cmdRun(
+ pepperEnv, target, "cat /etc/cni/net.d/10-calico.conf"
+ )['return'][0].values()[0].toString()
+ def cniCfg = new JsonSlurper().parseText(cniCfgResult)
+ if (cniCfg.get("policy") != null) {
+ if (cniCfg["policy"].get("type") == "k8s") {
+ cniPolicy = true
+ } else {
+ common.warningMsg("Calico policy type is unknown or not set.")
+ }
+ }
+
+ // check k8s addons
+ def addonsResult = salt.cmdRun(
+ pepperEnv, target, "ls /etc/kubernetes/addons"
+ )['return'][0].values()[0].toString()
+ if (addonsResult.contains("calico_policy")) {
+ addonsPolicy = true
+ }
+
+ // check kube-controllers is running
+ def kubeCtrlResult = salt.cmdRun(
+ pepperEnv, target, "kubectl get pod -n kube-system --selector=k8s-app=calico-kube-controllers"
+ )['return'][0].values()[0].toString()
+ if (kubeCtrlResult.contains("Running")) {
+ kubeCtrlRunning = true
+ }
+
+ // It's safe to enable Calico policy any time, but it may be unsafe to disable it.
+ // So, no need to disable Calico policy for v3.x if it's not in use currently.
+ // But if Calico policy is in use already, it should be enabled after upgrade as well.
+
+ // check for consistency
+ if ((cniPolicy != addonsPolicy) || (addonsPolicy != kubeCtrlRunning)) {
+ caution = "ATTENTION. Calico policy setting cannot be determined reliably (enabled in CNI config: ${cniPolicy}, " +
+ "presence in k8s addons: ${addonsPolicy}, kube-controllers is running: ${kubeCtrlRunning})."
+ currentBuild.description += "<br><b>${caution}</b><br><br>"
+ common.warningMsg(caution)
+ } else {
+ common.infoMsg("Current Calico policy state is detected as: ${cniPolicy}")
+ if (cniPolicy) {
+ // Calico policy is in use. Check policy setting for v3.x.
+ common.infoMsg("Calico policy is in use. It should be enabled for v3.x as well.")
+ def saltPolicyResult = salt.getPillar(
+ pepperEnv, target, "kubernetes:pool:network:calico:policy"
+ )["return"][0].values()[0].toString()
+
+ common.infoMsg("kubernetes.pool.network.calico.policy: ${saltPolicyResult}")
+ if (saltPolicyResult.toLowerCase().contains("true")) {
+ common.infoMsg("Calico policy setting for v3.x is detected as: true")
+ } else {
+ caution = "ATTENTION. Currently, Calico is running with policy switched on. " +
+ "Calico policy setting for v3.x is not set to true. " +
+ "After upgrade is completed, Calico policy will be switched off. " +
+ "You will need to switch it on manually if required."
+ currentBuild.description += "<br><b>${caution}</b><br><br>"
+ common.warningMsg(caution)
+ }
+ }
+ }
+
+ if (addonsPolicy) {
+ // Remove v2.6.x policy-related addons on masters to not interfere with v3.x kube-controllers
+ salt.cmdRun(pepperEnv, CTL_TARGET, "rm -rf /etc/kubernetes/addons/calico_policy")
+ }
+ }
+}
+
timeout(time: 12, unit: 'HOURS') {
node() {
try {
@@ -378,6 +459,9 @@
pullCalicoImages(pepperEnv, POOL)
}
+ // check and adjust Calico policy setting
+ checkCalicoPolicySetting(pepperEnv, ctl_node)
+
// this sequence implies workloads operations downtime
startCalicoUpgrade(pepperEnv, ctl_node)
performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, POOL)
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
index b886467..7134cfb 100644
--- a/test-openscap-pipeline.groovy
+++ b/test-openscap-pipeline.groovy
@@ -15,6 +15,7 @@
* For OVAL definitions, paths to OVAL definition files separated by semicolon, profile is ignored.
* XCCDF_VERSION The XCCDF version (default 1.2)
* XCCDF_TAILORING_ID The tailoring id (default None)
+ * XCCDF_CPE CPE dictionary or language for applicability checks (default None)
*
* TARGET_SERVERS The target Salt nodes (default *)
*
@@ -149,6 +150,7 @@
def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
def xccdfVersion = XCCDF_VERSION ?: '1.2'
def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+ def xccdfCPE = XCCDF_CPE ?: ''
def targetServers = TARGET_SERVERS ?: '*'
// To have an ability to work in heavy concurrency conditions
@@ -203,7 +205,7 @@
salt.runSaltProcessStep(pepperEnv, targetServers, 'oscap.eval', [
benchmarkType, benchmarkFile, "results_dir=${resultsDir}",
"profile=${profileName}", "xccdf_version=${xccdfVersion}",
- "tailoring_id=${xccdfTailoringId}"
+ "tailoring_id=${xccdfTailoringId}", "cpe=${xccdfCPE}"
])
salt.cmdRun(pepperEnv, targetServers, "rm -f /tmp/${scanUUID}.tar.xz; tar -cJf /tmp/${scanUUID}.tar.xz -C ${resultsBaseDir} .")
diff --git a/test-salt-model-wrapper.groovy b/test-salt-model-wrapper.groovy
index 3ef577b..95b6819 100644
--- a/test-salt-model-wrapper.groovy
+++ b/test-salt-model-wrapper.groovy
@@ -26,14 +26,26 @@
*/
import groovy.json.JsonOutput
+gerrit = new com.mirantis.mk.Gerrit()
cookiecutterTemplatesRepo='mk/cookiecutter-templates'
reclassSystemRepo='salt-models/reclass-system'
slaveNode = env.getProperty('SLAVE_NODE') ?: 'python&&docker'
+voteMatrix = [
+ 'test-mk-cookiecutter-templates': true,
+ 'test-drivetrain': true,
+ 'oscore-test-cookiecutter-models': false,
+ 'test-salt-model-infra': true,
+ 'test-salt-model-mcp-virtual-lab': true,
+]
+
+baseGerritConfig = [:]
+jobResultComments = [:]
+commentLock = false
+
LinkedHashMap getManualRefParams(LinkedHashMap map) {
LinkedHashMap manualParams = [:]
- String defaultGitRef = 'HEAD'
if (map.containsKey('RECLASS_SYSTEM_GIT_REF') && map.containsKey('RECLASS_SYSTEM_URL')) {
manualParams[reclassSystemRepo] = [
'url': map.get('RECLASS_SYSTEM_URL'),
@@ -51,33 +63,60 @@
return manualParams
}
-def runTests(String jobName, String extraVars, Boolean propagateStatus=true) {
- return {
- try {
- build job: "${jobName}", parameters: [
- [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVars ]
- ]
- } catch (Exception e) {
- if (propagateStatus) {
- throw e
+def setGerritReviewComment(Boolean initComment = false) {
+ if (baseGerritConfig) {
+ while(commentLock) {
+ sleep 5
+ }
+ commentLock = true
+ LinkedHashMap config = baseGerritConfig.clone()
+ String jobResultComment = ''
+ jobResultComments.each { job, info ->
+ String skipped = ''
+ if (!initComment) {
+ skipped = voteMatrix.get(job, 'true') ? '' : '(skipped)'
}
+ jobResultComment += "- ${job} ${info.url}console : ${info.status} ${skipped}".trim() + '\n'
+ }
+ config['message'] = sh(script: "echo '${jobResultComment}'", returnStdout: true).trim()
+ gerrit.postGerritComment(config)
+ commentLock = false
+ }
+}
+
+def runTests(String jobName, String extraVars) {
+ def propagateStatus = voteMatrix.get(jobName, true)
+ return {
+ def jobBuild = build job: jobName, propagate: false, parameters: [
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVars ]
+ ]
+ jobResultComments[jobName] = [ 'url': jobBuild.absoluteUrl, 'status': jobBuild.result ]
+ setGerritReviewComment()
+ if (propagateStatus && jobBuild.result == 'FAILURE') {
+ throw new Exception("Build ${jobName} is failed!")
}
}
}
-def runTestSaltModelReclass(String cluster, String defaultGitUrl, String clusterGitUrl, String refSpec) {
+def runTestSaltModelReclass(String jobName, String defaultGitUrl, String clusterGitUrl, String refSpec) {
+ def propagateStatus = voteMatrix.get(jobName, true)
return {
- build job: "test-salt-model-${cluster}", parameters: [
+ def jobBuild = build job: jobName, propagate: false, parameters: [
[$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
[$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
[$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
[$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: refSpec ],
]
+ jobResultComments[jobName] = [ 'url': jobBuild.absoluteUrl, 'status': jobBuild.result ]
+ setGerritReviewComment()
+ if (propagateStatus && jobBuild.result == 'FAILURE') {
+ throw new Exception("Build ${jobName} is failed!")
+ }
}
}
-def checkReclassSystemDocumentationCommit(gerritLib, gerritCredentials) {
- gerritLib.gerritPatchsetCheckout([
+def checkReclassSystemDocumentationCommit(gerritCredentials) {
+ gerrit.gerritPatchsetCheckout([
credentialsId: gerritCredentials
])
@@ -90,14 +129,16 @@
timeout(time: 12, unit: 'HOURS') {
node(slaveNode) {
def common = new com.mirantis.mk.Common()
- def gerrit = new com.mirantis.mk.Gerrit()
def git = new com.mirantis.mk.Git()
def python = new com.mirantis.mk.Python()
// Var TEST_PARAMETERS_YAML contains any additional parameters for tests,
// like manually specified Gerrit Refs/URLs, additional parameters and so on
- if (env.getProperty('TEST_PARAMETERS_YAML')) {
- common.mergeEnv(env, env.getProperty('TEST_PARAMETERS_YAML'))
+ def buildTestParams = [:]
+ def buildTestParamsYaml = env.getProperty('TEST_PARAMETERS_YAML')
+ if (buildTestParamsYaml) {
+ common.mergeEnv(env, buildTestParamsYaml)
+ buildTestParams = readYaml text: buildTestParamsYaml
}
// init required job variables
@@ -128,6 +169,7 @@
gerritHost = job_env.get('GERRIT_HOST')
gerritPort = job_env.get('GERRIT_PORT')
gerritChangeNumber = job_env.get('GERRIT_CHANGE_NUMBER')
+ gerritPatchSetNumber = job_env.get('GERRIT_PATCHSET_NUMBER')
gerritBranch = job_env.get('GERRIT_BRANCH')
// check if change aren't already merged
@@ -144,6 +186,14 @@
'branch': gerritBranch,
]
buildType = 'Gerrit Trigger'
+ buildTestParams << job_env.findAll { k,v -> k ==~ /GERRIT_.+/ }
+ baseGerritConfig = [
+ 'gerritName': gerritName,
+ 'gerritHost': gerritHost,
+ 'gerritChangeNumber': gerritChangeNumber,
+ 'credentialsId': gerritCredentials,
+ 'gerritPatchSetNumber': gerritPatchSetNumber,
+ ]
} else {
projectsMap = getManualRefParams(job_env)
if (!projectsMap) {
@@ -157,40 +207,43 @@
descriptionMsgs.add("Branch for ${project} => ${projectsMap[project]['branch']}")
}
descriptionMsgs.add("Distrib revision => ${distribRevision}")
- currentBuild.description = descriptionMsgs.join('\n')
+ currentBuild.description = descriptionMsgs.join('<br/>')
}
stage("Run tests") {
def branches = [:]
- branches.failFast = true
+ String branchJobName = ''
if (projectsMap.containsKey(reclassSystemRepo)) {
- def documentationOnly = checkReclassSystemDocumentationCommit(gerrit, gerritCredentials)
+ def documentationOnly = checkReclassSystemDocumentationCommit(gerritCredentials)
if (['master'].contains(gerritBranch) && !documentationOnly) {
for (int i = 0; i < testModels.size(); i++) {
def cluster = testModels[i]
- //def clusterGitUrl = projectsMap[reclassSystemRepo]['url'].substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
- def clusterGitUrl = ''
- branches["reclass-system-${cluster}"] = runTestSaltModelReclass(cluster, projectsMap[reclassSystemRepo]['url'], clusterGitUrl, projectsMap[reclassSystemRepo]['ref'])
+ def clusterGitUrl = projectsMap[reclassSystemRepo]['url'].substring(0, projectsMap[reclassSystemRepo]['url'].lastIndexOf("/") + 1) + cluster
+ branchJobName = "test-salt-model-${cluster}"
+ branches[branchJobName] = runTestSaltModelReclass(branchJobName, projectsMap[reclassSystemRepo]['url'], clusterGitUrl, projectsMap[reclassSystemRepo]['ref'])
}
} else {
common.warningMsg("Tests for ${testModels} skipped!")
}
}
if (projectsMap.containsKey(reclassSystemRepo) || projectsMap.containsKey(cookiecutterTemplatesRepo)) {
- branches['cookiecutter-templates'] = runTests('test-mk-cookiecutter-templates', JsonOutput.toJson(job_env))
+ branchJobName = 'test-mk-cookiecutter-templates'
+ branches[branchJobName] = runTests(branchJobName, JsonOutput.toJson(buildTestParams))
}
if (projectsMap.containsKey(cookiecutterTemplatesRepo)) {
- branches['test-drivetrain'] = runTests('test-drivetrain', JsonOutput.toJson(job_env))
- branches['oscore-test-cookiecutter-models'] = runTests('oscore-test-cookiecutter-models', JsonOutput.toJson(job_env))
+ branchJobName = 'test-drivetrain'
+ branches[branchJobName] = runTests(branchJobName, JsonOutput.toJson(buildTestParams))
+ branchJobName = 'oscore-test-cookiecutter-models'
+ branches[branchJobName] = runTests(branchJobName, JsonOutput.toJson(buildTestParams))
}
- // temp block to disable test run until job is stable
- print branches.keySet()
- currentBuild.result = 'SUCCESS'
- return
- // ----
-
+ branches.keySet().each { key ->
+ if (branches[key] instanceof Closure) {
+ jobResultComments[key] = [ 'url': job_env.get('BUILD_URL'), 'status': 'WAITING' ]
+ }
+ }
+ setGerritReviewComment(true)
parallel branches
}
}