Merge "Add pipeline for stacklight upgrade"
diff --git a/.gitreview b/.gitreview
index 9075ea3..ce0aa41 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
-host=gerrit.mcp.mirantis.net
+host=gerrit.mcp.mirantis.com
port=29418
project=mk/mk-pipelines.git
diff --git a/build-debian-packages-prometheus-relay.groovy b/build-debian-packages-prometheus-relay.groovy
index f101f57..ea19c9d 100644
--- a/build-debian-packages-prometheus-relay.groovy
+++ b/build-debian-packages-prometheus-relay.groovy
@@ -13,7 +13,7 @@
sh("rm -rf * || true")
}
- def workingDir = "src/gerrit.mcp.mirantis.net/debian"
+ def workingDir = "src/gerrit.mcp.mirantis.com/debian"
stage("checkout") {
git.checkoutGitRepository(
"${workingDir}/prometheus-relay",
@@ -53,7 +53,7 @@
export GOROOT=\$PWD/go &&
export GOPATH=\$PWD &&
export PATH=\$PATH:\$GOPATH/bin:\$GOROOT/bin &&
- cd src/gerrit.mcp.mirantis.net/debian/prometheus-relay &&
+ cd src/gerrit.mcp.mirantis.com/debian/prometheus-relay &&
make""")
}
archiveArtifacts artifacts: "${workingDir}/prometheus-relay/build/*.deb"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 74e4629..aadc7c9 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -423,6 +423,8 @@
}
orchestrate.installKubernetesCompute(venvPepper, extra_tgt)
+ // Setup kubernetes addons for opencontrail. More info in the definition of the func.
+ orchestrate.setupKubeAddonForContrail(venvPepper, extra_tgt)
}
}
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 8802c1b..d58d1e0 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -458,7 +458,8 @@
stage("Apply highstate on ${target} nodes") {
try {
common.retry(3){
- salt.enforceHighstate(pepperEnv, target)
+ out = salt.enforceHighstate(pepperEnv, target)
+ salt.printSaltCommandResult(out)
}
} catch (Exception e) {
common.errorMsg(e)
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index d88c9d1..4ccc74a 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -59,12 +59,18 @@
imageName = getImageName(sourceImage)
targetImageFull = "${targetRegistryPath}/${imageName}:${env.IMAGE_TAG}"
srcImage = docker.image(sourceImage)
- srcImage.pull()
+ common.retry(3, 5) {
+ srcImage.pull()
+ }
// Use sh-docker call for tag, due magic code in plugin:
// https://github.com/jenkinsci/docker-workflow-plugin/blob/docker-workflow-1.17/src/main/resources/org/jenkinsci/plugins/docker/workflow/Docker.groovy#L168-L170
sh("docker tag ${srcImage.id} ${targetImageFull}")
common.infoMsg("Attempt to push docker image into remote registry: ${env.REGISTRY_URL}")
- sh("docker push ${targetImageFull}")
+ common.retry(3, 5) {
+ docker.withRegistry(env.REGISTRY_URL, env.TARGET_REGISTRY_CREDENTIALS_ID) {
+ sh("docker push ${targetImageFull}")
+ }
+ }
if (targetImageFull.contains(externalMarker)) {
external = true
}
@@ -95,7 +101,9 @@
common.infoMsg("artifactoryProperties=> ${artifactoryProperties}")
// Call pipeline-library routine to set properties
def mcp_artifactory = new com.mirantis.mcp.MCPArtifactory()
- mcp_artifactory.setProperties(imgUrl - '/manifest.json', artifactoryProperties)
+ common.retry(3, 5) {
+ mcp_artifactory.setProperties(imgUrl - '/manifest.json', artifactoryProperties)
+ }
}
}
}
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index 99f487d..aeaee9a 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -9,13 +9,30 @@
def gerrit = new com.mirantis.mk.Gerrit()
def ssh = new com.mirantis.mk.Ssh()
+slaveNode = env.SLAVE_NODE ?: 'docker'
+giveVerify = false
@NonCPS
def isJobExists(jobName) {
return Jenkins.instance.items.find { it -> it.name.equals(jobName) }
}
-slaveNode = env.SLAVE_NODE ?: 'docker'
+def callJobWithExtraVars(String jobName) {
+ def gerritVars = '\n---'
+ for (envVar in env.getEnvironment()) {
+ if (envVar.key.startsWith("GERRIT_")) {
+ gerritVars += "\n${envVar.key}: '${envVar.value}'"
+ }
+ }
+ testJob = build job: jobName, parameters: [
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: gerritVars]
+ ]
+ if (testJob.getResult() != 'SUCCESS') {
+ error("Gate job ${testJob.getBuildUrl().toString()} finished with ${testJob.getResult()} !")
+ }
+ giveVerify = true
+}
+
timeout(time: 12, unit: 'HOURS') {
node(slaveNode) {
@@ -25,7 +42,6 @@
ssh.ensureKnownHosts(GERRIT_HOST)
def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
def doSubmit = false
- def giveVerify = false
stage("test") {
if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")) {
// test max CodeReview
@@ -44,16 +60,22 @@
gerritProject = gerritProject + "-latest"
}
def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
- if (isJobExists(testJob)) {
- common.infoMsg("Test job ${testJob} found, running")
- def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
- ]
- giveVerify = true
+ if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
+ callJobWithExtraVars('test-mk-cookiecutter-templates')
+ } else if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ callJobWithExtraVars('test-salt-model-reclass-system')
} else {
- common.infoMsg("Test job ${testJob} not found")
+ if (isJobExists(testJob)) {
+ common.infoMsg("Test job ${testJob} found, running")
+ def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+ ]
+ giveVerify = true
+ } else {
+ common.infoMsg("Test job ${testJob} not found")
+ }
}
} else {
common.errorMsg("Change don't have a CodeReview, skipping gate")
@@ -82,4 +104,4 @@
throw e
}
}
-}
\ No newline at end of file
+}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index e1cd638..5e31d36 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -199,7 +199,7 @@
}
}
- def commonScriptsRepoUrl = 'https://gerrit.mcp.mirantis.net/mcp/mcp-common-scripts'
+ def commonScriptsRepoUrl = 'https://gerrit.mcp.mirantis.com/mcp/mcp-common-scripts'
checkout([
$class : 'GitSCM',
branches : [[name: 'FETCH_HEAD'],],
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 3e7828b..530a256 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -10,6 +10,8 @@
* CTL_TARGET Salt targeted kubernetes CTL nodes (ex. I@kubernetes:master). Kubernetes control plane
* CMP_TARGET Salt targeted compute nodes (ex. cmp* and 'I@kubernetes:pool') Kubernetes computes
* PER_NODE Target nodes will be managed one by one (bool)
+ * SIMPLE_UPGRADE Use previous version of upgrade without conron/drain abilities
+ * UPGRADE_DOCKER Upgrade docker component
*
**/
def common = new com.mirantis.mk.Common()
@@ -50,6 +52,60 @@
}
}
+def cordonNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Cordoning ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl cordon ${nodeShortName}", true, 1)
+ }
+}
+
+def uncordonNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Uncordoning ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl uncordon ${nodeShortName}", true, 1)
+ }
+}
+
+def drainNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Draining ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl drain --force --ignore-daemonsets --grace-period 100 --timeout 300s --delete-local-data ${nodeShortName}", true, 1)
+ }
+}
+
+def regenerateCerts(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Regenerate certs for ${target}") {
+ salt.enforceState(pepperEnv, target, 'salt.minion.cert')
+ }
+}
+
+def updateAddons(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Upgrading Addons at ${target}") {
+ salt.enforceState(pepperEnv, target, "kubernetes.master.kube-addons")
+ salt.enforceState(pepperEnv, target, "kubernetes.master.setup")
+ }
+}
+
+def upgradeDocker(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Upgrading docker at ${target}") {
+ salt.enforceState(pepperEnv, target, 'docker.host')
+ }
+}
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -73,7 +129,19 @@
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- performKubernetesControlUpdate(pepperEnv, t)
+ if (SIMPLE_UPGRADE.toBoolean()) {
+ performKubernetesControlUpdate(pepperEnv, t)
+ } else {
+ cordonNode(pepperEnv, t)
+ drainNode(pepperEnv, t)
+ regenerateCerts(pepperEnv, t)
+ if (UPGRADE_DOCKER.toBoolean()) {
+ upgradeDocker(pepperEnv, t)
+ }
+ performKubernetesControlUpdate(pepperEnv, t)
+ updateAddons(pepperEnv, t)
+ uncordonNode(pepperEnv, t)
+ }
}
} else {
performKubernetesControlUpdate(pepperEnv, target)
@@ -87,7 +155,18 @@
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- performKubernetesComputeUpdate(pepperEnv, t)
+ if (SIMPLE_UPGRADE.toBoolean()) {
+ performKubernetesComputeUpdate(pepperEnv, t)
+ } else {
+ cordonNode(pepperEnv, t)
+ drainNode(pepperEnv, t)
+ regenerateCerts(pepperEnv, t)
+ if (UPGRADE_DOCKER.toBoolean()) {
+ upgradeDocker(pepperEnv, t)
+ }
+ performKubernetesComputeUpdate(pepperEnv, t)
+ uncordonNode(pepperEnv, t)
+ }
}
} else {
performKubernetesComputeUpdate(pepperEnv, target)
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index af96600..4d9d498 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -33,7 +33,7 @@
def CONTROL_PKGS = 'contrail-config contrail-config-openstack contrail-control contrail-dns contrail-lib contrail-nodemgr contrail-utils contrail-web-controller contrail-web-core neutron-plugin-contrail python-contrail'
def ANALYTIC_PKGS = 'contrail-analytics contrail-lib contrail-nodemgr contrail-utils python-contrail'
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms contrail-nova-driver'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service supervisor-vrouter start'
def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index bf35d97..52a0d23 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -31,7 +31,7 @@
def thirdPartyAnalyticsPkgsToRemove = 'redis-server,supervisor'
//def cmpPkgs = ['contrail-lib', 'contrail-nodemgr', 'contrail-utils', 'contrail-vrouter-agent', 'contrail-vrouter-utils', 'python-contrail', 'python-contrail-vrouter-api', 'python-opencontrail-vrouter-netns', 'contrail-vrouter-dkms']
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service contrail-vrouter-agent start; service contrail-vrouter-nodemgr start'
def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper', 'redis-server']
def configServices = ['contrail-webui-jobserver', 'contrail-webui-webserver', 'supervisor-config', 'supervisor-database', 'zookeeper']
def controlServices = ['ifmap-server', 'supervisor-control', 'redis-server']
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 6a6eea2..5febb3c 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -159,6 +159,7 @@
for (target in upgradeTargets){
common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
}
diff --git a/promote-vcp-images.groovy b/promote-vcp-images.groovy
index 181eafa..7b4f80e 100644
--- a/promote-vcp-images.groovy
+++ b/promote-vcp-images.groovy
@@ -17,6 +17,8 @@
slaveNode = env.SLAVE_NODE ?: 'jsl23.mcp.mirantis.net'
def job_env = env.getEnvironment().findAll { k, v -> v }
def verify = job_env.VERIFY_DOWNLOAD ?: true
+def overwrite = job_env.FORCE_OVERWRITE.toBoolean() ?: false
+
timeout(time: 6, unit: 'HOURS') {
@@ -91,7 +93,7 @@
remoteImageStatus = ''
remoteImageStatus = sh(script: "wget --auth-no-challenge --spider ${targetImageUrl} 2>/dev/null", returnStatus: true)
// wget return code 8 ,if file not exist
- if (remoteImageStatus != '8') {
+ if (remoteImageStatus != 8 && !overwrite) {
error("Attempt to overwrite existing release! Target: ${targetImage} already exist!")
}
}
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 4cae93c..470f338 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -46,7 +46,7 @@
[$class: 'StringParameterValue', name: 'TARGET_REGISTRY_CREDENTIALS_ID', value: dockerCredentials],
[$class: 'StringParameterValue', name: 'REGISTRY_URL', value: dockerRegistryUrl],
[$class: 'StringParameterValue', name: 'IMAGE_TAG', value: targetTag],
- [$class: 'StringParameterValue', name: 'IMAGE_LIST', value: imageList],
+ [$class: 'TextParameterValue', name: 'IMAGE_LIST', value: imageList],
[$class: 'StringParameterValue', name: 'SOURCE_IMAGE_TAG', value: sourceImageTag],
]
}
@@ -67,7 +67,7 @@
def triggerGitTagJob(gitRepoList, gitCredentials, tag, sourceTag) {
build job: "tag-git-repos-all", parameters: [
- [$class: 'StringParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
+ [$class: 'TextParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
[$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
[$class: 'StringParameterValue', name: 'TAG', value: tag],
[$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
@@ -76,9 +76,10 @@
def triggerPromoteVCPJob(VcpImageList, tag, sourceTag) {
build job: "promote-vcp-images-all", parameters: [
- [$class: 'StringParameterValue', name: 'VCP_IMAGE_LIST', value: VcpImageList],
+ [$class: 'TextParameterValue', name: 'VCP_IMAGE_LIST', value: VcpImageList],
[$class: 'StringParameterValue', name: 'TAG', value: tag],
- [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag]
+ [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
+ [$class: 'BooleanParameterValue', name: 'FORCE_OVERWRITE', value: true],
]
}
diff --git a/sync-http-to-s3.groovy b/sync-http-to-s3.groovy
new file mode 100644
index 0000000..108a394
--- /dev/null
+++ b/sync-http-to-s3.groovy
@@ -0,0 +1,29 @@
+def common = new com.mirantis.mk.Common()
+
+
+node("docker") {
+ stage('Prepare') {
+ img = docker.image(IMAGE)
+ img.pull()
+ }
+ stage('Upload') {
+ FILENAMES.split().each { filename ->
+ url = "${SOURCE}/${filename}"
+ img.withRun("--entrypoint='/bin/bash'") { c ->
+ withCredentials([[$class : 'UsernamePasswordMultiBinding', credentialsId: 'aws-s3',
+ usernameVariable: 'S3_ACCESS_KEY', passwordVariable: 'S3_SECRET_KEY']]) {
+ img.inside("-e S3_ACCESS_KEY=${S3_ACCESS_KEY} -e S3_SECRET_KEY=${S3_SECRET_KEY}") {
+ common.retry(3, 5) {
+ sh(script: "wget --progress=dot:giga -O ${filename} ${url}", returnStdout: true)
+ sh(script: "/usr/local/bin/s4cmd put ${filename} ${DEST}/${filename}", returnStdout: true)
+ }
+ }
+ }
+
+
+ }
+ sh("rm ${filename}")
+ }
+ }
+ deleteDir()
+}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 6813be9..6f73570 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -17,6 +17,11 @@
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
+def extraVarsYAML = env.EXTRA_VARIABLES_YAML ?: false
+if (extraVarsYAML) {
+ common.mergeEnv(env, extraVarsYAML)
+}
+
slaveNode = env.SLAVE_NODE ?: 'docker'
checkIncludeOrder = env.CHECK_INCLUDE_ORDER ?: false
@@ -24,7 +29,7 @@
alreadyMerged = false
gerritConData = [credentialsId : env.CREDENTIALS_ID,
gerritName : env.GERRIT_NAME ?: 'mcp-jenkins',
- gerritHost : env.GERRIT_HOST ?: 'gerrit.mcp.mirantis.net',
+ gerritHost : env.GERRIT_HOST ?: 'gerrit.mcp.mirantis.com',
gerritScheme : env.GERRIT_SCHEME ?: 'ssh',
gerritPort : env.GERRIT_PORT ?: '29418',
gerritRefSpec : null,
@@ -32,7 +37,7 @@
withWipeOut : true,
GERRIT_CHANGE_NUMBER: null]
//
-//ccTemplatesRepo = env.COOKIECUTTER_TEMPLATE_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates'
+//ccTemplatesRepo = env.COOKIECUTTER_TEMPLATE_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates'
gerritDataCCHEAD = [:]
gerritDataCC = [:]
gerritDataCC << gerritConData
@@ -40,7 +45,7 @@
gerritDataCC['gerritRefSpec'] = env.COOKIECUTTER_TEMPLATE_REF ?: null
gerritDataCC['gerritProject'] = 'mk/cookiecutter-templates'
//
-//reclassSystemRepo = env.RECLASS_SYSTEM_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system'
+//reclassSystemRepo = env.RECLASS_SYSTEM_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system'
gerritDataRSHEAD = [:]
gerritDataRS = [:]
gerritDataRS << gerritConData
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
new file mode 100644
index 0000000..c57e67d
--- /dev/null
+++ b/test-openscap-pipeline.groovy
@@ -0,0 +1,282 @@
+/**
+ *
+ * Run openscap xccdf evaluation on given nodes
+ *
+ * Expected parametes:
+ * SALT_MASTER_URL Full Salt API address.
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ *
+ * XCCDF_BENCHMARKS_DIR The XCCDF benchmarks base directory (default /usr/share/xccdf-benchmarks/mirantis/)
+ * XCCDF_BENCHMARKS List of pairs XCCDF benchmark filename and corresponding profile separated with ','
+ * these pairs are separated with semicolon.
+ * (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile)
+ * XCCDF_VERSION The XCCDF version (default 1.2)
+ * XCCDF_TAILORING_ID The tailoring id (default None)
+ *
+ * TARGET_SERVERS The target Salt nodes (default *)
+ *
+ * ARTIFACTORY_URL The artifactory URL
+ * ARTIFACTORY_NAMESPACE The artifactory namespace (default 'mirantis/openscap')
+ * ARTIFACTORY_REPO The artifactory repo (default 'binary-dev-local')
+ *
+ * UPLOAD_TO_DASHBOARD Boolean. Upload results to the WORP or not
+ * DASHBOARD_API_URL The WORP api base url. Mandatory if UPLOAD_TO_DASHBOARD is true
+ */
+
+
+
+/**
+ * Upload results to the `WORP` dashboard
+ *
+ * @param apiUrl The base dashboard api url
+ * @param cloudName The cloud name (mostly, the given node's domain name)
+ * @param nodeName The node name
+ * @param reportType Type of the report to create/use, either 'openscap' or 'cve'
+ * @param reportId Report Id to re-use, if empty report will be created
+ * @param results The scanning results as a json file content (string)
+ * @return reportId The Id of the report created if incoming reportId was empty, otherwise incoming reportId
+ */
+def uploadResultToDashboard(apiUrl, cloudName, nodeName, reportType, reportId, results) {
+ def common = new com.mirantis.mk.Common()
+ def http = new com.mirantis.mk.Http()
+
+ // Yes, we do not care of performance and will create at least 4 requests per each result
+ def requestData = [:]
+
+ def cloudId
+ def nodeId
+
+ def worpApi = [:]
+ worpApi["url"] = apiUrl
+
+ // Let's take a look, may be our minion is already presented on the dashboard
+ // Get available environments
+ common.infoMsg("Making GET to ${worpApi.url}/environment/")
+ environments = http.restGet(worpApi, "/environment/")
+ for (environment in environments) {
+ if (environment['name'] == cloudName) {
+ cloudId = environment['uuid']
+ break
+ }
+ }
+ // Cloud wasn't presented, let's create it
+ if (! cloudId ) {
+ // Create cloud
+ requestData = [:]
+ requestData['name'] = cloudName
+ common.infoMsg("Making POST to ${worpApi.url}/environment/ with ${requestData}")
+ cloudId = http.restPost(worpApi, "/environment/", requestData)['env']['uuid']
+
+ // And the node
+ // It was done here to reduce count of requests to the api.
+ // Because if there was not cloud presented on the dashboard, then the node was not presented as well.
+ requestData = [:]
+ requestData['nodes'] = [nodeName]
+ common.infoMsg("Making PUT to ${worpApi.url}/environment/${cloudId}/nodes/ with ${requestData}")
+ nodeId = http.restCall(worpApi, "/environment/${cloudId}/nodes/", "PUT", requestData)['uuid']
+ }
+
+ if (! nodeId ) {
+ // Get available nodes in our environment
+ common.infoMsg("Making GET to ${worpApi.url}/environment/${cloudId}/nodes/")
+ nodes = http.restGet(worpApi, "/environment/${cloudId}/nodes/")
+ for (node in nodes) {
+ if (node['name'] == nodeName) {
+ nodeId = node['uuid']
+ break
+ }
+ }
+ }
+
+ // Node wasn't presented, let's create it
+ if (! nodeId ) {
+ // Create node
+ requestData = [:]
+ requestData['nodes'] = [nodeName]
+ common.infoMsg("Making PUT to ${worpApi.url}/environment/${cloudId}/nodes/ with ${requestData}")
+ nodeId = http.restCall(worpApi, "/environment/${cloudId}/nodes/", "PUT", requestData)['uuid']
+ }
+
+ // Create report if needed
+ if (! reportId ) {
+ requestData = [:]
+ requestData['env_uuid'] = cloudId
+ common.infoMsg("Making POST to ${worpApi.url}/reports/${reportType}/ with ${requestData}")
+ reportId = http.restPost(worpApi, "/reports/${reportType}/", requestData)['report']['uuid']
+ }
+
+ // Upload results
+ // NOTE(pas-ha) results should already be a dict with 'results' key
+ requestData = common.parseJSON(results)
+ requestData['node_name'] = nodeName
+ common.infoMsg("First result in results to PUT is ${requestData['results'][0]}")
+ // NOTE(pas-ha) not logging whole results to be sent, is too large and just spams the logs
+ common.infoMsg("Making PUT to ${worpApi.url}/reports/${reportType}/${reportId}/ with node name ${requestData['node_name']} and results")
+ http.restCall(worpApi, "/reports/${reportType}/${reportId}/", "PUT", requestData)
+ return reportId
+}
+
+
+node('python') {
+ def pepperEnv = 'pepperEnv'
+
+ // XCCDF related variables
+ def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
+ def benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/'
+ def xccdfVersion = XCCDF_VERSION ?: '1.2'
+ def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+ def targetServers = TARGET_SERVERS ?: '*'
+
+ def salt = new com.mirantis.mk.Salt()
+ def python = new com.mirantis.mk.Python()
+ def common = new com.mirantis.mk.Common()
+ def http = new com.mirantis.mk.Http()
+
+ // To have an ability to work in heavy concurrency conditions
+ def scanUUID = UUID.randomUUID().toString()
+
+ def artifactsArchiveName = "openscap-${scanUUID}.zip"
+ def resultsBaseDir = "/var/log/openscap/${scanUUID}"
+ def artifactsDir = "openscap"
+
+ def liveMinions
+
+
+ stage ('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage ('Run openscap xccdf evaluation and attempt to upload the results to a dashboard') {
+ liveMinions = salt.getMinions(pepperEnv, targetServers)
+
+ if (liveMinions.isEmpty()) {
+ throw new Exception('There are no alive minions')
+ }
+
+ common.infoMsg("Scan UUID: ${scanUUID}")
+
+ // Clean all results before proceeding with results from every minion
+ dir(artifactsDir) {
+ deleteDir()
+ }
+
+ def reportId
+ for (minion in liveMinions) {
+
+ // Iterate oscap evaluation over the benchmarks
+ for (benchmark in benchmarksAndProfilesArray) {
+ def (benchmarkFilePath, profile) = benchmark.tokenize(',').collect({it.trim()})
+
+ // Remove extension from the benchmark name
+ def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
+
+ // Get benchmark name
+ def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
+
+ // And build resultsDir based on this path
+ def resultsDir = "${resultsBaseDir}/${benchmarkPathWithoutExtension}"
+
+ def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
+
+ def nodeShortName = minion.tokenize('.')[0]
+
+ def archiveName = "${scanUUID}_${nodeShortName}_${benchmarkName}.tar"
+
+ // Evaluate the benchmark
+ salt.runSaltProcessStep(pepperEnv, minion, 'oscap.eval', [
+ 'xccdf', benchmarkFile, "results_dir=${resultsDir}",
+ "profile=${profile}", "xccdf_version=${xccdfVersion}",
+ "tailoring_id=${xccdfTailoringId}"
+ ])
+
+ salt.cmdRun(pepperEnv, minion, "tar -cf /tmp/${archiveName} -C ${resultsBaseDir} .")
+ fileContents = salt.cmdRun(pepperEnv, minion, "cat /tmp/${archiveName}", true, null, false)['return'][0].values()[0].replaceAll('Salt command execution success', '')
+
+ sh "mkdir -p ${artifactsDir}/${scanUUID}/${nodeShortName}"
+ writeFile file: "${archiveName}", text: fileContents
+ sh "tar --strip-components 1 -xf ${archiveName} --directory ${artifactsDir}/${scanUUID}/${nodeShortName}; rm -f ${archiveName}"
+
+ // Remove archive which is not needed anymore
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', "/tmp/${archiveName}")
+
+ // Attempt to upload the scanning results to the dashboard
+ if (UPLOAD_TO_DASHBOARD.toBoolean()) {
+ if (common.validInputParam('DASHBOARD_API_URL')) {
+ def cloudName = salt.getGrain(pepperEnv, minion, 'domain')['return'][0].values()[0].values()[0]
+ reportId = uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, "openscap", reportId, salt.getFileContent(pepperEnv, minion, "${resultsDir}/results.json"))
+ } else {
+ throw new Exception('Uploading to the dashboard is enabled but the DASHBOARD_API_URL was not set')
+ }
+ }
+ }
+ }
+
+ // Prepare archive
+ sh "tar -cJf ${artifactsDir}.tar.xz ${artifactsDir}"
+
+ // Archive the build output artifacts
+ archiveArtifacts artifacts: "*.xz"
+ }
+
+/* // Will be implemented later
+ stage ('Attempt to upload results to an artifactory') {
+ if (common.validInputParam('ARTIFACTORY_URL')) {
+ for (minion in liveMinions) {
+ def destDir = "${artifactsDir}/${minion}"
+ def archiveName = "openscap-${scanUUID}.tar.gz"
+ def tempArchive = "/tmp/${archiveName}"
+ def destination = "${destDir}/${archiveName}"
+
+ dir(destDir) {
+ // Archive scanning results on the remote target
+ salt.runSaltProcessStep(pepperEnv, minion, 'archive.tar', ['czf', tempArchive, resultsBaseDir])
+
+ // Get it content and save it
+ writeFile file: destination, text: salt.getFileContent(pepperEnv, minion, tempArchive)
+
+ // Remove scanning results and the temp archive on the remote target
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', resultsBaseDir)
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', tempArchive)
+ }
+ }
+
+ def artifactory = new com.mirantis.mcp.MCPArtifactory()
+ def artifactoryName = 'mcp-ci'
+ def artifactoryRepo = ARTIFACTORY_REPO ?: 'binary-dev-local'
+ def artifactoryNamespace = ARTIFACTORY_NAMESPACE ?: 'mirantis/openscap'
+ def artifactoryServer = Artifactory.server(artifactoryName)
+ def publishInfo = true
+ def buildInfo = Artifactory.newBuildInfo()
+ def zipName = "${env.WORKSPACE}/openscap/${scanUUID}/results.zip"
+
+ // Zip scan results
+ zip zipFile: zipName, archive: false, dir: artifactsDir
+
+ // Mandatory and additional properties
+ def properties = artifactory.getBinaryBuildProperties([
+ "scanUuid=${scanUUID}",
+ "project=openscap"
+ ])
+
+ // Build Artifactory spec object
+ def uploadSpec = """{
+ "files":
+ [
+ {
+ "pattern": "${zipName}",
+ "target": "${artifactoryRepo}/${artifactoryNamespace}/openscap",
+ "props": "${properties}"
+ }
+ ]
+ }"""
+
+ // Upload artifacts to the given Artifactory
+ artifactory.uploadBinariesToArtifactory(artifactoryServer, buildInfo, uploadSpec, publishInfo)
+
+ } else {
+ common.warningMsg('ARTIFACTORY_URL was not given, skip uploading to artifactory')
+ }
+ }
+*/
+
+}
diff --git a/test-reclass-package.groovy b/test-reclass-package.groovy
new file mode 100644
index 0000000..109d986
--- /dev/null
+++ b/test-reclass-package.groovy
@@ -0,0 +1,45 @@
+/**
+ * Check new Reclass version against current model.
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ * DISTRIB_REVISION Mirror version to use
+ * EXTRA_REPO_PREDEFINED Use mcp extra repo defined on host
+ * EXTRA_REPO Extra repo to use in format (for example, deb [arch=amd64] http://apt.mirantis.com/xenial/ nightly extra)
+ * EXTRA_REPO_GPG_KEY_URL GPG key URL for extra repo
+ * TARGET_NODES Target specification, e.g. 'I@openssh:server'
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def saltModel = new com.mirantis.mk.SaltModelTesting()
+def python = new com.mirantis.mk.Python()
+
+def env = "env"
+def extraRepo = env.EXTRA_REPO
+def extraRepoKey = env.EXTRA_REPO_GPG_KEY_URL
+def targetNodes = env.TARGET_NODES
+def distribRevision = env.DISTRIB_REVISION
+def usePredefinedExtra = env.EXTRA_REPO_PREDEFINED
+node('cfg') {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ def minions = salt.getMinionsSorted(env, targetNodes)
+ if (usePredefinedExtra) {
+ def mcp_extra = salt.getPillar(env, 'I@salt:master', "linux:system:repo:mcp_extra").get("return")[0].values()[0]
+ extraRepoKey = mcp_extra['key_url']
+ extraRepo = mcp_extra['source']
+ }
+ def config = [
+ 'distribRevision': distribRevision,
+ 'targetNodes': minions,
+ 'extraRepo': extraRepo,
+ 'extraRepoKey': extraRepoKey,
+ 'venv': env
+ ]
+ saltModel.compareReclassVersions(config)
+}
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index f4467c1..729fdb4 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -61,36 +61,26 @@
common = new com.mirantis.mk.Common()
def setupRunner() {
-
- def branches = [:]
- for (int i = 0; i < PARALLEL_NODE_GROUP_SIZE.toInteger() && i < futureNodes.size(); i++) {
- branches["Runner ${i}"] = {
- while (futureNodes && !failedNodes) {
- def currentNode = futureNodes[0] ? futureNodes[0] : null
+ def branches = [:]
+ branches.failFast = true
+ for(int i = 0; i < futureNodes.size(); i++) {
+ def currentNode = futureNodes[i] ? futureNodes[i] : null
if (!currentNode) {
- continue
+ continue
}
-
- def clusterName = currentNode[2]
- futureNodes.remove(currentNode)
- try {
- triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
- } catch (Exception e) {
- if (e.getMessage().contains("completed with status ABORTED")) {
- common.warningMsg("Test of ${clusterName} failed because the test was aborted : ${e}")
- futureNodes << currentNode
- } else {
- common.warningMsg("Test of ${clusterName} failed : ${e}")
- failedNodes = true
- }
+ branches["Runner ${i}"] = {
+ try {
+ triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
+ } catch (Exception e) {
+ common.warningMsg("Test of ${currentNode[2]} failed : ${e}")
+ throw e
+ }
}
- }
}
- }
- if (branches) {
- parallel branches
- }
+ if (branches) {
+ common.runParallel(branches, PARALLEL_NODE_GROUP_SIZE.toInteger())
+ }
}
def triggerTestNodeJob(defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource) {
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index 47dde97..04eafeb 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -1,6 +1,19 @@
def gerrit = new com.mirantis.mk.Gerrit()
def common = new com.mirantis.mk.Common()
+// extraVarsYaml contains GERRIT_ vars from gate job
+// or will contain GERRIT_ vars from reclass-system patch
+def extraVarsYaml = env.EXTRA_VARIABLES_YAML ?: ''
+if (extraVarsYaml != '') {
+ common.mergeEnv(env, extraVarsYaml)
+} else {
+ extraVarsYaml = '\n---'
+ for (envVar in env.getEnvironment()) {
+ if (envVar.key.startsWith("GERRIT_")) {
+ extraVarsYaml += "\n${envVar.key}: '${envVar.value}'"
+ }
+ }
+}
def slaveNode = env.SLAVE_NODE ?: 'python&&docker'
def gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
@@ -69,7 +82,8 @@
branches["cookiecutter"] = {
build job: "test-mk-cookiecutter-templates", parameters: [
[$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_URL', value: defaultGitUrl],
- [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_GIT_REF', value: systemRefspec]
+ [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_GIT_REF', value: systemRefspec],
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVarsYaml ]
]
}
parallel branches