Merge "Update test-openscap-pipeline.groovy pipeline"
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 8dea65b..92701bd 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -10,14 +10,12 @@
* PER_NODE Target nodes will be managed one by one (bool)
* ROLLBACK_BY_REDEPLOY Omit taking live snapshots. Rollback is planned to be done by redeployment (bool)
* STOP_SERVICES Stop API services before update (bool)
- * TARGET_KERNEL_UPDATES Comma separated list of nodes to update kernel if newer version is available (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_REBOOT Comma separated list of nodes to reboot after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_HIGHSTATE Comma separated list of nodes to run Salt Highstate on after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_UPDATES Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_ROLLBACKS Comma separated list of nodes to rollback (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
- * TARGET_SNAPSHOT_MERGES Comma separated list of nodes to merge live snapshot for (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
- * CTL_TARGET Salt targeted CTL nodes (ex. ctl*)
- * PRX_TARGET Salt targeted PRX nodes (ex. prx*)
+ * TARGET_KERNEL_UPDATES Comma separated list of nodes to update kernel if newer version is available (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_REBOOT Comma separated list of nodes to reboot after update or physical machine rollback (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_HIGHSTATE Comma separated list of nodes to run Salt Highstate on after update or physical machine rollback (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_UPDATES Comma separated list of nodes to update (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid,kvm,osd)
+ * TARGET_ROLLBACKS Comma separated list of nodes to rollback (Valid values are msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,kvm,osd)
+ * TARGET_SNAPSHOT_MERGES Comma separated list of nodes to merge live snapshot for (Valid values are cfg,msg,dbs,log,mon,mtr,ntw,nal,cmn,rgw,cid)
* MSG_TARGET Salt targeted MSG nodes (ex. msg*)
* DBS_TARGET Salt targeted DBS nodes (ex. dbs*)
* LOG_TARGET Salt targeted LOG nodes (ex. log*)
@@ -28,10 +26,8 @@
* CMN_TARGET Salt targeted CMN nodes (ex. cmn*)
* RGW_TARGET Salt targeted RGW nodes (ex. rgw*)
* CID_TARGET Salt targeted CID nodes (ex. cid*)
- * CMP_TARGET Salt targeted physical compute nodes (ex. cmp001*)
* KVM_TARGET Salt targeted physical KVM nodes (ex. kvm01*)
* CEPH_OSD_TARGET Salt targeted physical Ceph OSD nodes (ex. osd001*)
- * GTW_TARGET Salt targeted physical or virtual GTW nodes (ex. gtw01*)
* ROLLBACK_PKG_VERSIONS Space separated list of pkgs=versions to rollback to on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
* PURGE_PKGS Space separated list of pkgs=versions to be purged on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
* REMOVE_PKGS Space separated list of pkgs=versions to be removed on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
@@ -107,7 +103,7 @@
if (targetPackages != "") {
// list installed versions of pkgs that will be upgraded
- if (targetType == 'kvm' || targetType == 'cmp' || targetType == 'osd' || targetType == 'gtw-physical') {
+ if (targetType == 'kvm' || targetType == 'osd') {
def installedPkgs = []
def newPkgs = []
def targetPkgList = targetPackages.tokenize(',')
@@ -893,48 +889,6 @@
}
}
- if (updates.contains("ctl")) {
- def target = CTL_TARGET
- def type = 'ctl'
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- liveSnapshot(pepperEnv, target, type)
- }
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyAPIs(pepperEnv, target)
- }
- }
-
- if (updates.contains("prx")) {
- def target = PRX_TARGET
- def type = 'prx'
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- liveSnapshot(pepperEnv, target, type)
- }
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'nginx')
- }
- }
-
if (updates.contains("msg")) {
def target = MSG_TARGET
def type = 'msg'
@@ -1023,27 +977,6 @@
}
}
- if (updates.contains("gtw-virtual")) {
- def target = GTW_TARGET
- def type = 'gtw-virtual'
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- liveSnapshot(pepperEnv, target, type)
- }
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- }
- }
-
if (updates.contains("cmn")) {
def target = CMN_TARGET
def type = 'cmn'
@@ -1161,27 +1094,6 @@
}
}
- //
- //physical machines update CMP_TARGET
- //
- if (updates.contains("cmp")) {
- def target = CMP_TARGET
- def type = 'cmp'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'nova-compute')
- }
- }
-
if (updates.contains("kvm")) {
def target = KVM_TARGET
def type = 'kvm'
@@ -1218,24 +1130,6 @@
}
}
- if (updates.contains("gtw-physical")) {
- def target = GTW_TARGET
- def type = 'gtw-physical'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- updatePkgs(pepperEnv, t, type)
- highstate(pepperEnv, t, type)
- }
- } else {
- updatePkgs(pepperEnv, target, type)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- }
- }
-
/*
* Rollback section
*/
@@ -1249,30 +1143,6 @@
}
} */
- if (rollbacks.contains("ctl")) {
- def target = CTL_TARGET
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- rollback(pepperEnv, target, 'ctl')
- verifyAPIs(pepperEnv, target)
- } else {
- removeNode(pepperEnv, target, 'ctl')
- }
- }
- }
-
- if (rollbacks.contains("prx")) {
- def target = PRX_TARGET
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- rollback(pepperEnv, target, 'prx')
- verifyService(pepperEnv, target, 'nginx')
- } else {
- removeNode(pepperEnv, target, 'prx')
- }
- }
- }
-
if (rollbacks.contains("msg")) {
def target = MSG_TARGET
if (salt.testTarget(pepperEnv, target)) {
@@ -1323,18 +1193,6 @@
}
}
- if (rollbacks.contains("gtw-virtual")) {
- def target = GTW_TARGET
- if (salt.testTarget(pepperEnv, target)) {
- if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- rollback(pepperEnv, target, 'gtw')
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- } else {
- removeNode(pepperEnv, target, 'gtw')
- }
- }
- }
-
if (rollbacks.contains("cmn")) {
def target = CMN_TARGET
if (salt.testTarget(pepperEnv, target)) {
@@ -1401,27 +1259,6 @@
}
} */
- //
- //physical machines rollback CMP_TARGET
- //
- if (rollbacks.contains("cmp")) {
- def target = CMP_TARGET
- def type = 'cmp'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t, type)
- }
- } else {
- rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'nova-compute')
- }
- }
-
if (rollbacks.contains("kvm")) {
def target = KVM_TARGET
def type = 'kvm'
@@ -1458,24 +1295,6 @@
}
}
- if (rollbacks.contains("gtw-physical")) {
- def target = GTW_TARGET
- def type = 'gtw-physical'
- if (salt.testTarget(pepperEnv, target)) {
- if (PER_NODE.toBoolean()) {
- def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- for (t in targetHosts) {
- rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t, type)
- }
- } else {
- rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target, type)
- }
- verifyService(pepperEnv, target, 'neutron-dhcp-agent')
- }
- }
-
/*
* Merge snapshots section
*/
@@ -1485,20 +1304,6 @@
}
}
- if (merges.contains("ctl")) {
- if (salt.testTarget(pepperEnv, CTL_TARGET)) {
- mergeSnapshot(pepperEnv, CTL_TARGET, 'ctl')
- verifyService(pepperEnv, CTL_TARGET, 'nova-api')
- }
- }
-
- if (merges.contains("prx")) {
- if (salt.testTarget(pepperEnv, PRX_TARGET)) {
- mergeSnapshot(pepperEnv, PRX_TARGET, 'prx')
- verifyService(pepperEnv, PRX_TARGET, 'nginx')
- }
- }
-
if (merges.contains("msg")) {
if (salt.testTarget(pepperEnv, MSG_TARGET)) {
mergeSnapshot(pepperEnv, MSG_TARGET, 'msg')
@@ -1529,13 +1334,6 @@
}
}
- if (merges.contains("gtw-virtual")) {
- if (salt.testTarget(pepperEnv, GTW_TARGET)) {
- mergeSnapshot(pepperEnv, GTW_TARGET, 'gtw')
- verifyService(pepperEnv, GTW_TARGET, 'neutron-dhcp-agent')
- }
- }
-
if (merges.contains("cmn")) {
if (salt.testTarget(pepperEnv, CMN_TARGET)) {
mergeSnapshot(pepperEnv, CMN_TARGET, 'cmn')
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 7cf8e28..6b5c0e2 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -10,27 +10,34 @@
* TESTS_REPO Repo to clone
* TESTS_SETTINGS Additional environment varibales to apply
* PROXY Proxy to use for cloning repo or for pip
- * TEST_IMAGE Docker image link or name to use for running container with test framework.
+ * IMAGE Docker image to use for running container with test framework.
* DEBUG_MODE If you need to debug (keep container after test), please enabled this
- *
+ * To launch tests from cvp_spt docker images need to set IMAGE and left TESTS_REPO empty
*/
common = new com.mirantis.mk.Common()
validate = new com.mirantis.mcp.Validate()
salt = new com.mirantis.mk.Salt()
-def artifacts_dir = 'validation_artifacts/'
-def remote_dir = '/root/qa_results/'
+salt_testing = new com.mirantis.mk.SaltModelTesting()
+def artifacts_dir = "validation_artifacts/"
+def remote_dir = '/root/qa_results'
def container_workdir = '/var/lib'
+def name = 'cvp-spt'
+def xml_file = "${name}_report.xml"
def TARGET_NODE = "I@gerrit:client"
def reinstall_env = false
def container_name = "${env.JOB_NAME}"
def saltMaster
def settings
-node() {
+slaveNode = (env.getProperty('SLAVE_NODE')) ?: 'docker'
+imageName = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-spt:stable'
+
+node(slaveNode) {
try{
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ // TODO collaps TESTS_SETTINGS flow into EXTRA variables map
if ( TESTS_SETTINGS != "" ) {
for (var in TESTS_SETTINGS.tokenize(";")) {
key = var.tokenize("=")[0].trim()
@@ -50,11 +57,11 @@
validate.prepareVenv(TESTS_REPO, PROXY)
} else {
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
- salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}")
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}/")
+ salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}/")
validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
if ( TESTS_REPO != "") {
- salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/cvp*")
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/${container_name}")
salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
if ( reinstall_env ) {
@@ -66,8 +73,36 @@
}
stage('Run Tests') {
+ def creds = common.getCredentials(SALT_MASTER_CREDENTIALS)
+ def username = creds.username
+ def password = creds.password
+ def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -sv ${container_workdir}/${TESTS_SET}"
+
sh "mkdir -p ${artifacts_dir}"
- validate.runPyTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, TESTS_SETTINGS.tokenize(";"), container_name, TARGET_NODE, remote_dir, artifacts_dir)
+
+ def configRun = [
+ 'image': imageName,
+ 'baseRepoPreConfig': false,
+ 'dockerMaxCpus': 2,
+ 'dockerExtraOpts' : [
+ "-v /root/qa_results/:/root/qa_results/",
+ "-v ${env.WORKSPACE}/validation_artifacts/:${container_workdir}/validation_artifacts/",
+ "--entrypoint=''", // to override ENTRYPOINT=/bin/bash in Dockerfile of image
+ ],
+
+ 'envOpts' : [
+ "WORKSPACE=${container_workdir}/${name}",
+ "SALT_USERNAME=${username}",
+ "SALT_PASSWORD=${password}",
+ "SALT_URL=${SALT_MASTER_URL}"
+ ] + TESTS_SETTINGS.replaceAll('\\"', '').tokenize(";"),
+ 'runCommands' : [
+ '010_start_tests' : {
+ sh("cd ${container_workdir} && ${script}")
+ }
+ ]
+ ]
+ salt_testing.setupDockerAndTest(configRun)
}
stage ('Publish results') {
diff --git a/test-salt-model-wrapper.groovy b/test-salt-model-wrapper.groovy
index 3ef577b..700ba24 100644
--- a/test-salt-model-wrapper.groovy
+++ b/test-salt-model-wrapper.groovy
@@ -26,14 +26,26 @@
*/
import groovy.json.JsonOutput
+gerrit = new com.mirantis.mk.Gerrit()
cookiecutterTemplatesRepo='mk/cookiecutter-templates'
reclassSystemRepo='salt-models/reclass-system'
slaveNode = env.getProperty('SLAVE_NODE') ?: 'python&&docker'
+voteMatrix = [
+ 'test-mk-cookiecutter-templates': true,
+ 'test-drivetrain': true,
+ 'oscore-test-cookiecutter-models': false,
+ 'test-salt-model-infra': true,
+ 'test-salt-model-mcp-virtual-lab': true,
+]
+
+baseGerritConfig = [:]
+jobResultComments = [:]
+commentLock = false
+
LinkedHashMap getManualRefParams(LinkedHashMap map) {
LinkedHashMap manualParams = [:]
- String defaultGitRef = 'HEAD'
if (map.containsKey('RECLASS_SYSTEM_GIT_REF') && map.containsKey('RECLASS_SYSTEM_URL')) {
manualParams[reclassSystemRepo] = [
'url': map.get('RECLASS_SYSTEM_URL'),
@@ -51,33 +63,60 @@
return manualParams
}
-def runTests(String jobName, String extraVars, Boolean propagateStatus=true) {
- return {
- try {
- build job: "${jobName}", parameters: [
- [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVars ]
- ]
- } catch (Exception e) {
- if (propagateStatus) {
- throw e
+def setGerritReviewComment(Boolean initComment = false) {
+ if (baseGerritConfig) {
+ while(commentLock) {
+ sleep 5
+ }
+ commentLock = true
+ LinkedHashMap config = baseGerritConfig.clone()
+ String jobResultComment = ''
+ jobResultComments.each { job, info ->
+ String skipped = ''
+ if (!initComment) {
+ skipped = voteMatrix.get(job, 'true') ? '' : '(skipped)'
}
+ jobResultComment += "- ${job} ${info.url}console : ${info.status} ${skipped}".trim() + '\n'
+ }
+ config['message'] = sh(script: "echo '${jobResultComment}'", returnStdout: true).trim()
+ gerrit.postGerritComment(config)
+ commentLock = false
+ }
+}
+
+def runTests(String jobName, String extraVars) {
+ def propagateStatus = voteMatrix.get(jobName, true)
+ return {
+ def jobBuild = build job: jobName, propagate: false, parameters: [
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVars ]
+ ]
+ jobResultComments[jobName] = [ 'url': jobBuild.absoluteUrl, 'status': jobBuild.result ]
+ setGerritReviewComment()
+ if (propagateStatus && jobBuild.result == 'FAILURE') {
+ throw new Exception("Build ${jobName} is failed!")
}
}
}
-def runTestSaltModelReclass(String cluster, String defaultGitUrl, String clusterGitUrl, String refSpec) {
+def runTestSaltModelReclass(String jobName, String defaultGitUrl, String clusterGitUrl, String refSpec) {
+ def propagateStatus = voteMatrix.get(jobName, true)
return {
- build job: "test-salt-model-${cluster}", parameters: [
+ def jobBuild = build job: jobName, propagate: false, parameters: [
[$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
[$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
[$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
[$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: refSpec ],
]
+ jobResultComments[jobName] = [ 'url': jobBuild.absoluteUrl, 'status': jobBuild.result ]
+ setGerritReviewComment()
+ if (propagateStatus && jobBuild.result == 'FAILURE') {
+ throw new Exception("Build ${jobName} is failed!")
+ }
}
}
-def checkReclassSystemDocumentationCommit(gerritLib, gerritCredentials) {
- gerritLib.gerritPatchsetCheckout([
+def checkReclassSystemDocumentationCommit(gerritCredentials) {
+ gerrit.gerritPatchsetCheckout([
credentialsId: gerritCredentials
])
@@ -90,14 +129,16 @@
timeout(time: 12, unit: 'HOURS') {
node(slaveNode) {
def common = new com.mirantis.mk.Common()
- def gerrit = new com.mirantis.mk.Gerrit()
def git = new com.mirantis.mk.Git()
def python = new com.mirantis.mk.Python()
// Var TEST_PARAMETERS_YAML contains any additional parameters for tests,
// like manually specified Gerrit Refs/URLs, additional parameters and so on
- if (env.getProperty('TEST_PARAMETERS_YAML')) {
- common.mergeEnv(env, env.getProperty('TEST_PARAMETERS_YAML'))
+ def buildTestParams = [:]
+ def buildTestParamsYaml = env.getProperty('TEST_PARAMETERS_YAML')
+ if (buildTestParamsYaml) {
+ common.mergeEnv(env, buildTestParamsYaml)
+ buildTestParams = readYaml text: buildTestParamsYaml
}
// init required job variables
@@ -128,6 +169,7 @@
gerritHost = job_env.get('GERRIT_HOST')
gerritPort = job_env.get('GERRIT_PORT')
gerritChangeNumber = job_env.get('GERRIT_CHANGE_NUMBER')
+ gerritPatchSetNumber = job_env.get('GERRIT_PATCHSET_NUMBER')
gerritBranch = job_env.get('GERRIT_BRANCH')
// check if change aren't already merged
@@ -144,6 +186,14 @@
'branch': gerritBranch,
]
buildType = 'Gerrit Trigger'
+ buildTestParams << job_env.findAll { k,v -> k ==~ /GERRIT_.+/ }
+ baseGerritConfig = [
+ 'gerritName': gerritName,
+ 'gerritHost': gerritHost,
+ 'gerritChangeNumber': gerritChangeNumber,
+ 'credentialsId': gerritCredentials,
+ 'gerritPatchSetNumber': gerritPatchSetNumber,
+ ]
} else {
projectsMap = getManualRefParams(job_env)
if (!projectsMap) {
@@ -157,41 +207,49 @@
descriptionMsgs.add("Branch for ${project} => ${projectsMap[project]['branch']}")
}
descriptionMsgs.add("Distrib revision => ${distribRevision}")
- currentBuild.description = descriptionMsgs.join('\n')
+ currentBuild.description = descriptionMsgs.join('<br/>')
}
stage("Run tests") {
def branches = [:]
- branches.failFast = true
+ String branchJobName = ''
if (projectsMap.containsKey(reclassSystemRepo)) {
- def documentationOnly = checkReclassSystemDocumentationCommit(gerrit, gerritCredentials)
+ def documentationOnly = checkReclassSystemDocumentationCommit(gerritCredentials)
if (['master'].contains(gerritBranch) && !documentationOnly) {
for (int i = 0; i < testModels.size(); i++) {
def cluster = testModels[i]
- //def clusterGitUrl = projectsMap[reclassSystemRepo]['url'].substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
- def clusterGitUrl = ''
- branches["reclass-system-${cluster}"] = runTestSaltModelReclass(cluster, projectsMap[reclassSystemRepo]['url'], clusterGitUrl, projectsMap[reclassSystemRepo]['ref'])
+ def clusterGitUrl = projectsMap[reclassSystemRepo]['url'].substring(0, projectsMap[reclassSystemRepo]['url'].lastIndexOf("/") + 1) + cluster
+ branchJobName = "test-salt-model-${cluster}"
+ branches[branchJobName] = runTestSaltModelReclass(branchJobName, projectsMap[reclassSystemRepo]['url'], clusterGitUrl, projectsMap[reclassSystemRepo]['ref'])
}
} else {
common.warningMsg("Tests for ${testModels} skipped!")
}
}
if (projectsMap.containsKey(reclassSystemRepo) || projectsMap.containsKey(cookiecutterTemplatesRepo)) {
- branches['cookiecutter-templates'] = runTests('test-mk-cookiecutter-templates', JsonOutput.toJson(job_env))
+ branchJobName = 'test-mk-cookiecutter-templates'
+ branches[branchJobName] = runTests(branchJobName, JsonOutput.toJson(buildTestParams))
}
if (projectsMap.containsKey(cookiecutterTemplatesRepo)) {
- branches['test-drivetrain'] = runTests('test-drivetrain', JsonOutput.toJson(job_env))
- branches['oscore-test-cookiecutter-models'] = runTests('oscore-test-cookiecutter-models', JsonOutput.toJson(job_env))
+ branchJobName = 'test-drivetrain'
+ branches[branchJobName] = runTests(branchJobName, JsonOutput.toJson(buildTestParams))
+ // TODO: enable oscore-test job once it's ready to consume EXTRA_VARIABLES_YAML
+ //branches['oscore-test-cookiecutter-models'] = runTests('oscore-test-cookiecutter-models', JsonOutput.toJson(buildTestParams))
}
- // temp block to disable test run until job is stable
- print branches.keySet()
- currentBuild.result = 'SUCCESS'
- return
- // ----
-
- parallel branches
+ branches.keySet().each { key ->
+ if (branches[key] instanceof Closure) {
+ jobResultComments[key] = [ 'url': job_env.get('BUILD_URL'), 'status': 'WAITING' ]
+ }
+ }
+ setGerritReviewComment(true)
+ try {
+ parallel branches
+ } catch (Exception e) {
+ println e
+ println 'Job is in non-voting mode for now. Skipping fails.'
+ }
}
}
}