Merge "Revert "Fix properties setting in `docker-images-mirror`""
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 636c666..522d044 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -101,9 +101,9 @@
def ret = new URL("https://${targetRegistry}/artifactory/api/search/checksum?sha256=${image_sha256}").getText()
// Most probably, we would get many images, especially for external images. We need to guess
// exactly one, which we pushing now
- guessImage = targetImageFull.replace(':', '/').replace(targetRegistry, '')
+ def tgtGuessImage = targetImageFull.replace(':', '/').replace(targetRegistry, '')
ArrayList img_data = new JsonSlurper().parseText(ret)['results']
- def imgUrl = img_data*.uri.find { it.contains(guessImage) } - '/manifest.json'
+ def tgtImgUrl = img_data*.uri.find { it.contains(tgtGuessImage) } - '/manifest.json'
artifactoryProperties = [
'com.mirantis.targetTag' : env.IMAGE_TAG,
'com.mirantis.uniqueImageId': unique_image_id,
@@ -111,9 +111,12 @@
if (external) {
artifactoryProperties << ['com.mirantis.externalImage': external]
}
- def existingProps = mcp_artifactory.getPropertiesForArtifact(imgUrl)
+ def sourceRegistry = sourceImage.split('/')[0]
+ def sourceGuessImage = sourceImage.replace(':', '/').replace(sourceRegistry, '')
+ def sourceImgUrl = img_data*.uri.find { it.contains(sourceGuessImage) } - '/manifest.json'
+ def existingProps = mcp_artifactory.getPropertiesForArtifact(sourceImgUrl)
def historyProperties = []
- // check does image have already some props
+ // check does the source image have already history props
if (existingProps) {
historyProperties = existingProps.get('com.mirantis.versionHistory', [])
}
@@ -122,7 +125,7 @@
artifactoryProperties << [ 'com.mirantis.versionHistory': historyProperties.join(',') ]
common.infoMsg("artifactoryProperties=> ${artifactoryProperties}")
common.retry(3, 5) {
- mcp_artifactory.setProperties(imgUrl, artifactoryProperties)
+ mcp_artifactory.setProperties(tgtImgUrl, artifactoryProperties)
}
}
}
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 980aa2f..f9a16e0 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -24,6 +24,8 @@
* CALICO_UPGRADE_VERSION Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
*
**/
+import groovy.json.JsonSlurper
+
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
@@ -323,6 +325,85 @@
}
}
+def checkCalicoPolicySetting(pepperEnv, target) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Checking of Calico network policy setting") {
+ // check Calico policy enabled
+ def cniPolicy = false
+ def addonsPolicy = false
+ def kubeCtrlRunning = false
+
+ // check CNI config
+ def cniCfgResult = salt.cmdRun(
+ pepperEnv, target, "cat /etc/cni/net.d/10-calico.conf"
+ )['return'][0].values()[0].toString()
+ def cniCfg = new JsonSlurper().parseText(cniCfgResult)
+ if (cniCfg.get("policy") != null) {
+ if (cniCfg["policy"].get("type") == "k8s") {
+ cniPolicy = true
+ } else {
+ common.warningMsg("Calico policy type is unknown or not set.")
+ }
+ }
+
+ // check k8s addons
+ def addonsResult = salt.cmdRun(
+ pepperEnv, target, "ls /etc/kubernetes/addons"
+ )['return'][0].values()[0].toString()
+ if (addonsResult.contains("calico_policy")) {
+ addonsPolicy = true
+ }
+
+ // check kube-controllers is running
+ def kubeCtrlResult = salt.cmdRun(
+ pepperEnv, target, "kubectl get pod -n kube-system --selector=k8s-app=calico-kube-controllers"
+ )['return'][0].values()[0].toString()
+ if (kubeCtrlResult.contains("Running")) {
+ kubeCtrlRunning = true
+ }
+
+ // It's safe to enable Calico policy any time, but it may be unsafe to disable it.
+ // So, no need to disable Calico policy for v3.x if it's not in use currently.
+ // But if Calico policy is in use already, it should be enabled after upgrade as well.
+
+ // check for consistency
+ if ((cniPolicy != addonsPolicy) || (addonsPolicy != kubeCtrlRunning)) {
+ caution = "ATTENTION. Calico policy setting cannot be determined reliably (enabled in CNI config: ${cniPolicy}, " +
+ "presence in k8s addons: ${addonsPolicy}, kube-controllers is running: ${kubeCtrlRunning})."
+ currentBuild.description += "<br><b>${caution}</b><br><br>"
+ common.warningMsg(caution)
+ } else {
+ common.infoMsg("Current Calico policy state is detected as: ${cniPolicy}")
+ if (cniPolicy) {
+ // Calico policy is in use. Check policy setting for v3.x.
+ common.infoMsg("Calico policy is in use. It should be enabled for v3.x as well.")
+ def saltPolicyResult = salt.getPillar(
+ pepperEnv, target, "kubernetes:pool:network:calico:policy"
+ )["return"][0].values()[0].toString()
+
+ common.infoMsg("kubernetes.pool.network.calico.policy: ${saltPolicyResult}")
+ if (saltPolicyResult.toLowerCase().contains("true")) {
+ common.infoMsg("Calico policy setting for v3.x is detected as: true")
+ } else {
+ caution = "ATTENTION. Currently, Calico is running with policy switched on. " +
+ "Calico policy setting for v3.x is not set to true. " +
+ "After upgrade is completed, Calico policy will be switched off. " +
+ "You will need to switch it on manually if required."
+ currentBuild.description += "<br><b>${caution}</b><br><br>"
+ common.warningMsg(caution)
+ }
+ }
+ }
+
+ if (addonsPolicy) {
+ // Remove v2.6.x policy-related addons on masters to not interfere with v3.x kube-controllers
+ salt.cmdRun(pepperEnv, CTL_TARGET, "rm -rf /etc/kubernetes/addons/calico_policy")
+ }
+ }
+}
+
timeout(time: 12, unit: 'HOURS') {
node() {
try {
@@ -378,6 +459,9 @@
pullCalicoImages(pepperEnv, POOL)
}
+ // check and adjust Calico policy setting
+ checkCalicoPolicySetting(pepperEnv, ctl_node)
+
// this sequence implies workloads operations downtime
startCalicoUpgrade(pepperEnv, ctl_node)
performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, POOL)
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
index b886467..7134cfb 100644
--- a/test-openscap-pipeline.groovy
+++ b/test-openscap-pipeline.groovy
@@ -15,6 +15,7 @@
* For OVAL definitions, paths to OVAL definition files separated by semicolon, profile is ignored.
* XCCDF_VERSION The XCCDF version (default 1.2)
* XCCDF_TAILORING_ID The tailoring id (default None)
+ * XCCDF_CPE CPE dictionary or language for applicability checks (default None)
*
* TARGET_SERVERS The target Salt nodes (default *)
*
@@ -149,6 +150,7 @@
def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
def xccdfVersion = XCCDF_VERSION ?: '1.2'
def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+ def xccdfCPE = XCCDF_CPE ?: ''
def targetServers = TARGET_SERVERS ?: '*'
// To have an ability to work in heavy concurrency conditions
@@ -203,7 +205,7 @@
salt.runSaltProcessStep(pepperEnv, targetServers, 'oscap.eval', [
benchmarkType, benchmarkFile, "results_dir=${resultsDir}",
"profile=${profileName}", "xccdf_version=${xccdfVersion}",
- "tailoring_id=${xccdfTailoringId}"
+ "tailoring_id=${xccdfTailoringId}", "cpe=${xccdfCPE}"
])
salt.cmdRun(pepperEnv, targetServers, "rm -f /tmp/${scanUUID}.tar.xz; tar -cJf /tmp/${scanUUID}.tar.xz -C ${resultsBaseDir} .")
diff --git a/test-salt-model-wrapper.groovy b/test-salt-model-wrapper.groovy
index 700ba24..95b6819 100644
--- a/test-salt-model-wrapper.groovy
+++ b/test-salt-model-wrapper.groovy
@@ -234,8 +234,8 @@
if (projectsMap.containsKey(cookiecutterTemplatesRepo)) {
branchJobName = 'test-drivetrain'
branches[branchJobName] = runTests(branchJobName, JsonOutput.toJson(buildTestParams))
- // TODO: enable oscore-test job once it's ready to consume EXTRA_VARIABLES_YAML
- //branches['oscore-test-cookiecutter-models'] = runTests('oscore-test-cookiecutter-models', JsonOutput.toJson(buildTestParams))
+ branchJobName = 'oscore-test-cookiecutter-models'
+ branches[branchJobName] = runTests(branchJobName, JsonOutput.toJson(buildTestParams))
}
branches.keySet().each { key ->
@@ -244,12 +244,7 @@
}
}
setGerritReviewComment(true)
- try {
- parallel branches
- } catch (Exception e) {
- println e
- println 'Job is in non-voting mode for now. Skipping fails.'
- }
+ parallel branches
}
}
}