Merge "Pass GERRIT_BRANCH parameter from gating job to salt-formulas test job"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 3b6e831..de8d8fd 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -370,8 +370,7 @@
stage('Install infra') {
if (common.checkContains('STACK_INSTALL', 'core') ||
- common.checkContains('STACK_INSTALL', 'openstack') ||
- common.checkContains('STACK_INSTALL', 'oss')) {
+ common.checkContains('STACK_INSTALL', 'openstack')) {
orchestrate.installInfra(venvPepper, extra_tgt)
}
}
@@ -559,12 +558,6 @@
}
}
- if (common.checkContains('STACK_INSTALL', 'oss')) {
- stage('Install Oss infra') {
- orchestrate.installOssInfra(venvPepper, extra_tgt)
- }
- }
-
if (common.checkContains('STACK_INSTALL', 'cicd')) {
stage('Install Cicd') {
extra_tgt_bckp = extra_tgt
@@ -598,16 +591,6 @@
}
}
- if (common.checkContains('STACK_INSTALL', 'oss')) {
- stage('Install OSS') {
- if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
- // In case if StackLightv2 enabled containers already started
- salt.enforceState(venvPepper, "I@docker:swarm:role:master and I@devops_portal:config ${extra_tgt}", 'docker.client', true)
- }
- orchestrate.installOss(venvPepper, extra_tgt)
- }
- }
-
//
// Test
//
@@ -652,47 +635,6 @@
}
}
- if (common.checkContains('STACK_TEST', 'openstack')) {
- if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
- test.install_docker(venvPepper, TEST_TEMPEST_TARGET)
- }
- stage('Run OpenStack tests') {
- test.runTempestTests(venvPepper, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
- }
-
- stage('Copy Tempest results to config node') {
- test.copyTempestResults(venvPepper, TEST_TEMPEST_TARGET)
- }
-
- stage('Archive rally artifacts') {
- test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
- }
-
- if (common.validInputParam('TESTRAIL_REPORT') && TESTRAIL_REPORT.toBoolean()) {
- stage('Upload test results to TestRail') {
- def date = sh(script: 'date +%Y-%m-%d', returnStdout: true).trim()
- def plan = TESTRAIL_PLAN ?: "[${TESTRAIL_MILESTONE}]System-Devcloud-${date}"
- def group = TESTRAIL_GROUP ?: STACK_TEMPLATE
-
- salt.cmdRun(venvPepper, TEST_TEMPEST_TARGET, "cd /root/rally_reports && cp \$(ls -t *xml | head -n1) report.xml")
- test.uploadResultsTestrail("/root/rally_reports/report.xml",
- TESTRAIL_REPORTER_IMAGE, group, TESTRAIL_QA_CREDENTIALS,
- plan, TESTRAIL_MILESTONE, TESTRAIL_SUITE)
- }
- }
- }
-
-
- if (common.checkContains('STACK_TEST', 'ceph')) {
- stage('Run infra tests') {
- sleep(120)
- def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
- salt.cmdRun(venvPepper, 'I@salt:master', cmd, false)
- writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper, 'I@salt:master', '/root/report.xml'))
- junit(keepLongStdio: true, testResults: 'report.xml')
- }
- }
-
if (common.checkContains('STACK_TEST', 'opencontrail')) {
stage('Run opencontrail tests') {
def opencontrail_tests_dir = "/opt/opencontrail_test/fuel-plugin-contrail/plugin_test/vapor/"
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
new file mode 100644
index 0000000..c6fca0a
--- /dev/null
+++ b/cvp-tempest.groovy
@@ -0,0 +1,168 @@
+/**
+ *
+ * Launch CVP Tempest verification of the cloud
+ *
+ * Expected parameters:
+
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ * SERVICE_NODE Node, where runtest formula and some other states will be executed
+ * VERBOSE Show salt output in Jenkins console
+ * DEBUG_MODE Remove or keep container after the test
+ * STOP_ON_ERROR Stop pipeline if error during salt run occurs
+ * GENERATE_CONFIG Run runtest formula / generate Tempest config
+ * SKIP_LIST_PATH Path to skip list (not in use right now)
+ * TEST_IMAGE Docker image link to use for running container with testing tools.
+ * TARGET_NODE Node to run container with Tempest/Rally
+ * PREPARE_RESOURCES Prepare Openstack resources before test run
+ * TEMPEST_TEST_PATTERN Tests to run
+ * TEMPEST_ENDPOINT_TYPE Type of OS endpoint to use during test run (not in use right now)
+ * concurrency Number of threads to use for Tempest test run
+ * remote_artifacts_dir Folder to use for artifacts on remote node
+ * report_prefix Some prefix to put to report name
+ *
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def saltMaster
+extraYamlContext = env.getProperty('EXTRA_PARAMS')
+if (extraYamlContext) {
+ common.mergeEnv(env, extraYamlContext) }
+def SALT_MASTER_CREDENTIALS=(env.SALT_MASTER_CREDENTIALS) ?: 'salt'
+def VERBOSE = (env.VERBOSE) ?: true
+def DEBUG_MODE = (env.DEBUG_MODE) ?: false
+def STOP_ON_ERROR = (env.STOP_ON_ERROR) ?: false
+def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
+def report_prefix = (env.report_prefix) ?: ''
+def args = ''
+node() {
+ try{
+ stage('Initialization') {
+ deleteDir()
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (os_version == '') {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
+ TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+ runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+ if (runtest_node.values()[0]) {
+ // Let's use Service node that was defined in reclass. If several nodes are defined
+ // we will use the first from salt output
+ common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+ SERVICE_NODE = runtest_node.keySet()[0]
+ }
+ else {
+ common.infoMsg("Service node is not defined in reclass")
+ SERVICE_NODE = (env.SERVICE_NODE) ?: 'I@salt:master'
+ common.infoMsg("${SERVICE_NODE} will be used as Service node")
+ def classes_to_add = ["cluster.${cluster_name}.infra.runtest"]
+ fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+ common.infoMsg("Full service node name ${fullnodename}")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'classes': classes_to_add])
+ salt.checkResult(result)
+ }
+ common.infoMsg('Refreshing pillars on service node')
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
+ }
+ stage('Preparing resources') {
+ if ( PREPARE_RESOURCES.toBoolean() ) {
+ common.infoMsg('Running salt.minion state on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+ common.infoMsg('Running keystone.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running glance.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running nova.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+ }
+ else {
+ common.infoMsg('Skipping resources preparation')
+ }
+ }
+ stage('Generate config') {
+ if ( GENERATE_CONFIG ) {
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+ TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("TARGET_NODE is defined in Jenkins")
+ def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+ common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
+ salt.checkResult(result)
+ }
+ common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
+ // we need to refresh pillars on target node after runtest state
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("Reverting tempest_test_target parameter")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+ }
+ SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+ runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
+ if (SKIP_LIST_PATH) {
+ salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+ args += ' --blacklist-file /root/tempest/skip.list '
+ }
+ }
+ else {
+ common.infoMsg('Skipping Tempest config generation')
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+ }
+ }
+
+ stage('Run Tempest tests') {
+ // parameters: master, target, dockerImageLink, name, env_var, entrypoint, tempestConfLocalPath
+ validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', [], true,
+ '/root/test/tempest_generated.conf')
+ report_prefix += 'tempest_'
+ if (env.concurrency) {
+ args += ' -w ' + env.concurrency
+ }
+ if (TEMPEST_TEST_PATTERN == 'set=smoke') {
+ args += ' -s '
+ report_prefix += 'smoke'
+ }
+ else {
+ if (TEMPEST_TEST_PATTERN != 'set=full') {
+ args += " -r ${TEMPEST_TEST_PATTERN} "
+ report_prefix += 'full'
+ }
+ }
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' cvp /bin/bash -c 'run-tempest'")
+ }
+ stage('Collect results') {
+ report_prefix += "_report_${env.BUILD_NUMBER}"
+ // will be removed after changing runtest-formula logic
+ salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}/reports; mv ${remote_artifacts_dir}/report_* ${remote_artifacts_dir}/reports")
+ validate.addFiles(saltMaster, TARGET_NODE, "${remote_artifacts_dir}/reports", '')
+ sh "mv report_*.xml ${report_prefix}.xml"
+ sh "mv report_*.log ${report_prefix}.log"
+ archiveArtifacts artifacts: "${report_prefix}.*"
+ junit "${report_prefix}.xml"
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ } finally {
+ if (DEBUG_MODE == 'false') {
+ validate.runCleanup(saltMaster, TARGET_NODE)
+ }
+ }
+}
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
deleted file mode 100644
index 61015f5..0000000
--- a/lab-pipeline.groovy
+++ /dev/null
@@ -1,390 +0,0 @@
-/**
-
-
-
-
-
-
-
-
-
-* This pipeline is deprecated, please use cloud-deploy-pipeline
-
-
-
-
-
-
-
-
-
-
-
-
- *
- * Launch heat stack with basic k8s
- * Flow parameters:
- * STACK_NAME Heat stack name
- * STACK_TYPE Orchestration engine: heat, ''
- * STACK_INSTALL What should be installed (k8s, openstack, ...)
- * STACK_TEST What should be tested (k8s, openstack, ...)
- *
- * STACK_TEMPLATE_URL URL to git repo with stack templates
- * STACK_TEMPLATE_BRANCH Stack templates repo branch
- * STACK_TEMPLATE_CREDENTIALS Credentials to the stack templates repo
- * STACK_TEMPLATE Heat stack HOT template
- * STACK_RECLASS_ADDRESS Stack reclass address
- * STACK_RECLASS_BRANCH Stack reclass repo branch
- * STACK_DELETE Delete stack when finished (bool)
- * STACK_REUSE Reuse stack (don't create one)
- * STACK_CLEANUP_JOB Name of job for deleting Heat stack
- *
- * Expected parameters:
- * required for STACK_TYPE=heat
- * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
- * HEAT_STACK_ZONE Heat stack availability zone
- * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
- * OPENSTACK_API_URL OpenStack API address
- * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
- * OPENSTACK_API_PROJECT OpenStack project to connect to
- * OPENSTACK_API_PROJECT_DOMAIN Domain for OpenStack project
- * OPENSTACK_API_PROJECT_ID ID for OpenStack project
- * OPENSTACK_API_USER_DOMAIN Domain for OpenStack user
- * OPENSTACK_API_CLIENT Versions of OpenStack python clients
- * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
- *
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- *
- * required for STACK_TYPE=NONE or empty string
- * SALT_MASTER_URL URL of Salt-API
- *
- * Test settings:
- * TEST_K8S_API_SERVER Kubernetes API address
- * TEST_K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
- *
- * TEST_TEMPEST_IMAGE Tempest image link
- * TEST_DOCKER_INSTALL Install docker on the target if tue
- * TEST_TEMPEST_PATTERN If not false, run tests matched to pattern only
- * TEST_TEMPEST_TARGET Salt target for tempest node
- *
- * optional parameters for overwriting soft params
- * SALT_OVERRIDES YAML with overrides for Salt deployment
- *
- */
-
-common = new com.mirantis.mk.Common()
-git = new com.mirantis.mk.Git()
-openstack = new com.mirantis.mk.Openstack()
-orchestrate = new com.mirantis.mk.Orchestrate()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-
-_MAX_PERMITTED_STACKS = 2
-
-node {
- // try to get STACK_INSTALL or fallback to INSTALL if exists
- try {
- def temporary = STACK_INSTALL
- } catch (MissingPropertyException e) {
- try {
- STACK_INSTALL = INSTALL
- env['STACK_INSTALL'] = INSTALL
- } catch (MissingPropertyException e2) {
- common.errorMsg("Property STACK_INSTALL or INSTALL not found!")
- }
- }
- try {
- //
- // Prepare machines
- //
- stage ('Create infrastructure') {
-
- if (STACK_TYPE == 'heat') {
- // value defaults
- def openstackCloud
- def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
- def openstackEnv = "${env.WORKSPACE}/venv"
-
- if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
- error("If you want to reuse existing stack you need to provide it's name")
- }
-
- if (STACK_REUSE.toBoolean() == false) {
- // Don't allow to set custom heat stack name
- wrap([$class: 'BuildUser']) {
- if (env.BUILD_USER_ID) {
- STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
- } else {
- STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
- }
- currentBuild.description = STACK_NAME
- }
- }
-
- // set description
- currentBuild.description = "${STACK_NAME}"
-
- // get templates
- git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
-
- // create openstack env
- openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
- openstackCloud = openstack.createOpenstackEnv(
- OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
- OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
- OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
- OPENSTACK_API_VERSION)
- openstack.getKeystoneToken(openstackCloud, openstackEnv)
- //
- // Verify possibility of create stack for given user and stack type
- //
- wrap([$class: 'BuildUser']) {
- if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !env.BUILD_USER_ID.equals("mceloud") && !STACK_REUSE.toBoolean()) {
- def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
- if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
- STACK_DELETE = "false"
- throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
- }
- }
- }
- // launch stack
- if (STACK_REUSE.toBoolean() == false) {
- stage('Launch new Heat stack') {
- // create stack
- envParams = [
- 'instance_zone': HEAT_STACK_ZONE,
- 'public_net': HEAT_STACK_PUBLIC_NET
- ]
- try {
- envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
- envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
- } catch (MissingPropertyException e) {
- common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
- }
- openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
- }
- }
-
- // get SALT_MASTER_URL
- saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
- currentBuild.description = "${STACK_NAME}: ${saltMasterHost}"
-
- SALT_MASTER_URL = "http://${saltMasterHost}:6969"
- }
- }
-
- //
- // Connect to Salt master
- //
-
- def saltMaster
- stage('Connect to Salt API') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- // Set up override params
- if (env.getEnvironment().containsKey('SALT_OVERRIDES')) {
- stage('Set Salt overrides') {
- salt.setSaltOverrides(saltMaster, SALT_OVERRIDES)
- }
- }
-
- //
- // Install
- //
-
- if (common.checkContains('STACK_INSTALL', 'core')) {
- stage('Install core infrastructure') {
- orchestrate.installFoundationInfra(saltMaster)
-
- if (common.checkContains('STACK_INSTALL', 'kvm')) {
- orchestrate.installInfraKvm(saltMaster)
- orchestrate.installFoundationInfra(saltMaster)
- }
-
- orchestrate.validateFoundationInfra(saltMaster)
- }
- }
-
- // install k8s
- if (common.checkContains('STACK_INSTALL', 'k8s')) {
-
- // install infra libs for k8s
- stage('Install Kubernetes infra') {
- orchestrate.installKubernetesInfra(saltMaster)
- }
-
- // If k8s install with contrail network manager then contrail need to be install first
- if (common.checkContains('STACK_INSTALL', 'contrail')) {
- stage('Install Contrail for Kubernetes') {
- orchestrate.installContrailNetwork(saltMaster)
- orchestrate.installContrailCompute(saltMaster)
- orchestrate.installKubernetesContrailCompute(saltMaster)
- }
- }
-
- stage('Install Kubernetes control') {
- orchestrate.installKubernetesControl(saltMaster)
- }
- }
-
- // install openstack
- if (common.checkContains('STACK_INSTALL', 'openstack')) {
- // install Infra and control, tests, ...
-
- stage('Install OpenStack infra') {
- orchestrate.installOpenstackInfra(saltMaster)
- }
-
- stage('Install OpenStack control') {
- orchestrate.installOpenstackControl(saltMaster)
- }
-
- stage('Install OpenStack network') {
-
- if (common.checkContains('STACK_INSTALL', 'contrail')) {
- orchestrate.installContrailNetwork(saltMaster)
- } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
- orchestrate.installOpenstackNetwork(saltMaster)
- }
-
- salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; neutron net-list')
- salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; nova net-list')
- }
-
- if (salt.testTarget(saltMaster, 'I@ironic:conductor')){
- stage('Install OpenStack Ironic conductor') {
- orchestrate.installIronicConductor(saltMaster)
- }
- }
-
-
- stage('Install OpenStack compute') {
- orchestrate.installOpenstackCompute(saltMaster)
-
- if (common.checkContains('STACK_INSTALL', 'contrail')) {
- orchestrate.installContrailCompute(saltMaster)
- }
- }
-
- }
-
-
- if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
- stage('Install StackLight v1') {
- orchestrate.installStacklightv1Control(saltMaster)
- orchestrate.installStacklightv1Client(saltMaster)
- }
- }
-
- if (common.checkContains('STACK_INSTALL', 'stacklight')) {
- stage('Install StackLight') {
- orchestrate.installDockerSwarm(saltMaster)
- orchestrate.installStacklight(saltMaster)
- }
- }
-
- //
- // Test
- //
- def artifacts_dir = '_artifacts/'
-
- if (common.checkContains('STACK_TEST', 'k8s')) {
- stage('Run k8s bootstrap tests') {
- def image = 'tomkukral/k8s-scripts'
- def output_file = image.replaceAll('/', '-') + '.output'
-
- // run image
- test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
-
- // collect output
- sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- sh "cat ${artifacts_dir}${output_file}"
-
- // collect artifacts
- archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
- }
-
- stage('Run k8s conformance e2e tests') {
- //test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, TEST_K8S_CONFORMANCE_IMAGE)
-
- def image = TEST_K8S_CONFORMANCE_IMAGE
- def output_file = image.replaceAll('/', '-') + '.output'
-
- // run image
- test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
-
- // collect output
- sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- sh "cat ${artifacts_dir}${output_file}"
-
- // collect artifacts
- archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
- }
- }
-
- if (common.checkContains('STACK_TEST', 'openstack')) {
- if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
- test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
- }
- stage('Run OpenStack tests') {
- test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
- }
-
- stage('Copy Tempest results to config node') {
- test.copyTempestResults(saltMaster, TEST_TEMPEST_TARGET)
- }
- }
-
- if (common.checkContains('STACK_INSTALL', 'finalize')) {
- stage('Finalize') {
- salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
- }
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- } finally {
-
-
- //
- // Clean
- //
-
- if (STACK_TYPE == 'heat') {
- // send notification
- common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
-
- if (STACK_DELETE.toBoolean() == true) {
- common.errorMsg('Heat job cleanup triggered')
- stage('Trigger cleanup job') {
- build(job: STACK_CLEANUP_JOB, parameters: [
- [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
- [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_URL', value: OPENSTACK_API_URL],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_CREDENTIALS', value: OPENSTACK_API_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_DOMAIN', value: OPENSTACK_API_PROJECT_DOMAIN],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_ID', value: OPENSTACK_API_PROJECT_ID],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_USER_DOMAIN', value: OPENSTACK_API_USER_DOMAIN],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_CLIENT', value: OPENSTACK_API_CLIENT],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_VERSION', value: OPENSTACK_API_VERSION]
- ])
- }
- } else {
- if (currentBuild.result == 'FAILURE') {
- common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
-
- if (SALT_MASTER_URL) {
- common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
- }
- }
-
- }
- }
- }
-}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index a07f7fb..aa695f2 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -97,7 +97,7 @@
'buildId' : "${chunkJob.number}"])
}
-def StepTestModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
+def StepTestModel(_basename, _reclassArtifactName, _artifactCopyPath, _useExtraRepos = false) {
// We need to wrap what we return in a Groovy closure, or else it's invoked
// when this method is called, not when we pass it to parallel.
// To do this, you need to wrap the code below in { }, and either return
@@ -105,7 +105,7 @@
// return node object
return {
node(slaveNode) {
- testModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos)
+ testModel(_basename, _reclassArtifactName, _artifactCopyPath, _useExtraRepos)
}
}
}
@@ -130,6 +130,12 @@
def StepGenerateModels(_contextFileList, _virtualenv, _templateEnvDir) {
return {
+ if (fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
+ // Merge contexts for nice base.yml based diff
+ dir(_templateEnvDir) {
+ sh('tox -ve merge_contexts')
+ }
+ }
for (contextFile in _contextFileList) {
def basename = common.GetBaseName(contextFile, '.yml')
def contextYaml = readYaml text: readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
@@ -139,7 +145,8 @@
common.warningMsg('Disabling secrets_encryption_enabled for tests!')
contextYaml['default_context']['secrets_encryption_enabled'] = 'False'
}
- context = mcpCommon.dumpYAML(contextYaml)
+
+ def context = mcpCommon.dumpYAML(contextYaml)
if (!fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
common.warningMsg('Forming NEW reclass-root structure...')
python.generateModel(context, basename, 'cfg01', _virtualenv, "${_templateEnvDir}/model", _templateEnvDir)
@@ -149,10 +156,6 @@
// temp dir, and then copy it over initial structure.
def reclassTempRootDir = sh(script: "mktemp -d -p ${env.WORKSPACE}", returnStdout: true).trim()
python.generateModel(context, basename, 'cfg01', _virtualenv, reclassTempRootDir, _templateEnvDir)
- // Merge contexts for nice base.yml based diff
- dir(_templateEnvDir) {
- sh('tox -ve merge_contexts')
- }
dir("${_templateEnvDir}/model/${basename}/") {
if (fileExists(new File(reclassTempRootDir, 'reclass').toString())) {
common.warningMsg('Forming NEW reclass-root structure...')
@@ -261,8 +264,8 @@
// copy reclass system under envPath with -R and trailing / to support symlinks direct copy
sh("cp -R ${archiveBaseName}/ ${envPath}/${classesSystemDir}")
dir(envPath) {
- for (String context : contextList) {
- def basename = common.GetBaseName(context, '.yml')
+ for (String _context : contextList) {
+ def basename = common.GetBaseName(_context, '.yml')
dir("${envPath}/model/${basename}/classes") {
sh(script: "ln -sfv ../../../${classesSystemDir} system ")
}
diff --git a/test-run-rally.groovy b/test-run-rally.groovy
deleted file mode 100644
index 3f2339f..0000000
--- a/test-run-rally.groovy
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- *
- * Service test pipeline
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- * Test settings:
- * IMAGE_LINK Link to docker image with Rally
- * RALLY_SCENARIO Rally test scenario
- * TEST_TARGET Salt target for Rally node
- * CONTAINER_NAME Name of the Docker container which runs Rally
- * CLEANUP_REPORTS_AND_CONTAINER Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
- * DO_CLEANUP_RESOURCES If "true": runs clean-up script for removing Rally and Tempest resources
- */
-
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try {
-
- //
- // Prepare connection
- //
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- //
- // Test
- //
-
- stage('Run OpenStack Rally scenario') {
- test.runRallyScenarios(pepperEnv, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
- DO_CLEANUP_RESOURCES)
- }
- stage('Copy test reports') {
- test.copyTempestResults(pepperEnv, TEST_TARGET)
- }
- stage('Archiving test artifacts') {
- test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- } finally {
- if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
- stage('Cleanup reports and container') {
- test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
- test.removeDockerContainer(pepperEnv, TEST_TARGET, CONTAINER_NAME)
- }
- }
- }
- }
-}
diff --git a/test-run-tempest.groovy b/test-run-tempest.groovy
deleted file mode 100644
index 6edb276..0000000
--- a/test-run-tempest.groovy
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- *
- * Service test pipeline
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- * Test settings:
- * IMAGE_LINK Link to docker image with Rally and Tempest
- * TEST_TEMPEST_PATTERN If not false, run tests matched to pattern only
- * TEST_TARGET Salt target for tempest node
- * CLEANUP_REPORTS Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
- * SET Predefined set for tempest tests
- * CONCURRENCY How many processes to use to run Tempest tests
- * DO_CLEANUP_RESOURCES If "true": runs clean-up script for removing Rally and Tempest resources
- */
-
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- //
- // Test
- //
-
- stage('Run OpenStack Tempest tests') {
- test.runTempestTests(pepperEnv, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
- "/home/rally/keystonercv3", SET, CONCURRENCY, "mcp.conf", "mcp_skip.list", "/root/keystonercv3",
- "/root/rally_reports", DO_CLEANUP_RESOURCES)
- }
- stage('Copy test reports') {
- test.copyTempestResults(pepperEnv, TEST_TARGET)
- }
- stage('Archiving test artifacts') {
- test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- } finally {
- if (CLEANUP_REPORTS.toBoolean()) {
- stage('Cleanup reports') {
- test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
- }
- }
- }
- }
-}
diff --git a/test-salt-model-wrapper.groovy b/test-salt-model-wrapper.groovy
deleted file mode 100644
index 118431a..0000000
--- a/test-salt-model-wrapper.groovy
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- Global CI wrapper for testing next projects:
- - salt-models/reclass-system
- - mk/cookiecutter-templates
-
- Wrapper allows to test cross-project patches, based on
- 'Depends-On: http://<gerrit_address>/<change_number>' key phrase
- */
-
-import groovy.json.JsonOutput
-
-gerrit = new com.mirantis.mk.Gerrit()
-
-cookiecutterTemplatesRepo = 'mk/cookiecutter-templates'
-reclassSystemRepo = 'salt-models/reclass-system'
-slaveNode = env.getProperty('SLAVE_NODE') ?: 'virtual'
-
-voteMatrix = [
- 'test-mk-cookiecutter-templates' : true,
- 'test-drivetrain' : true,
- 'oscore-test-cookiecutter-models': false,
- 'test-salt-model-infra' : true,
- 'test-salt-model-mcp-virtual-lab': false,
-]
-
-baseGerritConfig = [:]
-buildTestParams = [:]
-jobResultComments = [:]
-commentLock = false
-
-// post Gerrit review comment to patch
-def setGerritReviewComment() {
- if (baseGerritConfig) {
- while (commentLock) {
- sleep 5
- }
- commentLock = true
- LinkedHashMap config = baseGerritConfig.clone()
- String jobResultComment = ''
- jobResultComments.each { threadName, info ->
- String skipped = voteMatrix.get(info.job, 'true') ? '' : '(non-voting)'
- jobResultComment += "- ${threadName} ${info.url}console : ${info.status} ${skipped}".trim() + '\n'
- }
- config['message'] = sh(script: "echo '${jobResultComment}'", returnStdout: true).trim()
- gerrit.postGerritComment(config)
- commentLock = false
- }
-}
-
-// get job parameters for YAML-based job parametrization
-def yamlJobParameters(LinkedHashMap jobParams) {
- return [
- [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: JsonOutput.toJson(jobParams)]
- ]
-}
-
-// run needed job with params
-def runTests(String jobName, ArrayList jobParams, String threadName = '', Boolean voteOverride = null) {
- threadName = threadName ? threadName : jobName
- def propagateStatus = voteOverride != null ? voteOverride : voteMatrix.get(jobName, true)
- return {
- def jobBuild = build job: jobName, propagate: false, parameters: jobParams
- jobResultComments[threadName] = ['url': jobBuild.absoluteUrl, 'status': jobBuild.result, 'job': jobName]
- setGerritReviewComment()
- if (propagateStatus && jobBuild.result == 'FAILURE') {
- throw new Exception("Build ${threadName} is failed!")
- }
- }
-}
-
-// set params based on depending patches
-def setupDependingVars(LinkedHashMap dependingProjects) {
- if (dependingProjects) {
- if (dependingProjects.containsKey(reclassSystemRepo)) {
- buildTestParams['RECLASS_SYSTEM_GIT_REF'] = dependingProjects[reclassSystemRepo].ref
- buildTestParams['RECLASS_SYSTEM_BRANCH'] = dependingProjects[reclassSystemRepo].branch
- }
- if (dependingProjects.containsKey(cookiecutterTemplatesRepo)) {
- buildTestParams['COOKIECUTTER_TEMPLATE_REF'] = dependingProjects[cookiecutterTemplatesRepo].ref
- buildTestParams['COOKIECUTTER_TEMPLATE_BRANCH'] = dependingProjects[cookiecutterTemplatesRepo].branch
- }
- }
-}
-
-timeout(time: 12, unit: 'HOURS') {
- node(slaveNode) {
- def common = new com.mirantis.mk.Common()
-
- // Var EXTRA_VARIABLES_YAML contains any additional parameters for tests,
- // like manually specified Gerrit Refs/URLs, additional parameters and so on
- def buildTestParamsYaml = env.getProperty('EXTRA_VARIABLES_YAML')
- if (buildTestParamsYaml) {
- common.mergeEnv(env, buildTestParamsYaml)
- buildTestParams = readYaml text: buildTestParamsYaml
- }
-
- // init required job variables
- LinkedHashMap job_env = env.getEnvironment().findAll { k, v -> v }
-
- // Gerrit parameters
- String gerritCredentials = job_env.get('CREDENTIALS_ID', 'gerrit')
- String gerritRef = job_env.get('GERRIT_REFSPEC')
- String gerritProject = job_env.get('GERRIT_PROJECT')
- String gerritName = job_env.get('GERRIT_NAME')
- String gerritScheme = job_env.get('GERRIT_SCHEME')
- String gerritHost = job_env.get('GERRIT_HOST')
- String gerritPort = job_env.get('GERRIT_PORT')
- String gerritChangeNumber = job_env.get('GERRIT_CHANGE_NUMBER')
- String gerritPatchSetNumber = job_env.get('GERRIT_PATCHSET_NUMBER')
- String gerritBranch = job_env.get('GERRIT_BRANCH')
- Boolean gateMode = job_env.get('GERRIT_CI_MERGE_TRIGGER', false).toBoolean()
-
- // Common and manual build parameters
- LinkedHashMap projectsMap = [:]
- String distribRevision = 'nightly'
- //checking if the branch is from release
- if (gerritBranch.startsWith('release')) {
- distribRevision = gerritBranch.tokenize('/')[-1]
- // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
- // After 2018q4 releases, need to also check 'static' repo, for example ubuntu.
- binTest = common.checkRemoteBinary(['mcp_version': distribRevision])
- if (!binTest.linux_system_repo_url || !binTest.linux_system_repo_ubuntu_url) {
- common.errorMsg("Binary release: ${distribRevision} not exist or not full. Fallback to 'proposed'! ")
- distribRevision = 'proposed'
- }
- }
- ArrayList testModels = job_env.get('TEST_MODELS', 'mcp-virtual-lab,infra').split(',')
-
- stage('Gerrit prepare') {
- // check if change aren't already merged
- def gerritChange = gerrit.getGerritChange(gerritName, gerritHost, gerritChangeNumber, gerritCredentials)
- if (gerritChange.status == "MERGED") {
- common.successMsg('Patch set is alredy merged, no need to test it')
- currentBuild.result = 'SUCCESS'
- return
- }
- buildTestParams << job_env.findAll { k, v -> k ==~ /GERRIT_.+/ }
- baseGerritConfig = [
- 'gerritName' : gerritName,
- 'gerritHost' : gerritHost,
- 'gerritPort' : gerritPort,
- 'gerritChangeNumber' : gerritChangeNumber,
- 'credentialsId' : gerritCredentials,
- 'gerritPatchSetNumber': gerritPatchSetNumber,
- ]
- LinkedHashMap gerritDependingProjects = gerrit.getDependentPatches(baseGerritConfig)
- setupDependingVars(gerritDependingProjects)
- ArrayList descriptionMsgs = [
- "Running with next parameters:",
- "Ref for ${gerritProject} => ${gerritRef}",
- "Branch for ${gerritProject} => ${gerritBranch}"
- ]
- descriptionMsgs.add("Distrib revision => ${distribRevision}")
- for (String project in gerritDependingProjects.keySet()) {
- descriptionMsgs.add("---")
- descriptionMsgs.add("Depending patch to ${project} found:")
- descriptionMsgs.add("Ref for ${project} => ${gerritDependingProjects[project]['ref']}")
- descriptionMsgs.add("Branch for ${project} => ${gerritDependingProjects[project]['branch']}")
- }
- currentBuild.description = descriptionMsgs.join('<br/>')
- gerrit.gerritPatchsetCheckout([
- credentialsId: gerritCredentials
- ])
- }
-
- stage("Run tests") {
- def documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
- if (documentationOnly) {
- common.infoMsg("Tests skipped, documenation only changed!")
- currentBuild.result = 'SUCCESS'
- return
- }
-
- def branches = [:]
- branches.failFast = false
- String branchJobName = ''
-
- if (gerritProject == reclassSystemRepo && gerritBranch == 'master') {
- sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD | grep .yml | xargs -I {} python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
- def defaultSystemURL = "${gerritScheme}://${gerritName}@${gerritHost}:${gerritPort}/${gerritProject}"
- for (int i = 0; i < testModels.size(); i++) {
- def cluster = testModels[i]
- def clusterGitUrl = defaultSystemURL.substring(0, defaultSystemURL.lastIndexOf("/") + 1) + cluster
- branchJobName = "test-salt-model-${cluster}"
- def jobParams = [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultSystemURL],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: gerritRef],
- ]
- branches[branchJobName] = runTests(branchJobName, jobParams)
- }
- }
- if (gerritProject == reclassSystemRepo || gerritProject == cookiecutterTemplatesRepo) {
- branchJobName = 'test-mk-cookiecutter-templates'
- branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
- }
-
- if (!gateMode) {
- // testing backward compatibility
- if (gerritBranch == 'master' && gerritProject == reclassSystemRepo) {
- def backwardCompatibilityRefsToTest = ['proposed', 'release/2018.11.0', 'release/2019.2.0']
- for (String oldRef in backwardCompatibilityRefsToTest) {
- LinkedHashMap buildTestParamsOld = buildTestParams.clone()
- buildTestParamsOld['COOKIECUTTER_TEMPLATE_REF'] = ''
- buildTestParamsOld['COOKIECUTTER_TEMPLATE_BRANCH'] = oldRef
- String threadName = "${branchJobName}-${oldRef}"
- // disable votes for release/2018.11.0 branch
- overrideVote = oldRef == 'release/2018.11.0' ? false : null
- branches[threadName] = runTests(branchJobName, yamlJobParameters(buildTestParamsOld), threadName, overrideVote)
- }
- }
- if (gerritProject == cookiecutterTemplatesRepo) {
- branchJobName = 'test-drivetrain'
- branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
- branchJobName = 'oscore-test-cookiecutter-models'
- branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
- }
- if (env['GERRIT_EVENT_COMMENT_TEXT'] && new String(env['GERRIT_EVENT_COMMENT_TEXT'].decodeBase64()) =~ /\ntest_schemas.*/) {
- if (gerritProject == reclassSystemRepo) {
- branchJobName = 'oscore-test-cookiecutter-models'
- branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
- }
- }
- }
- branches.keySet().each { key ->
- if (branches[key] instanceof Closure) {
- jobResultComments[key] = ['url': job_env.get('BUILD_URL'), 'status': 'WAITING']
- }
- }
- setGerritReviewComment()
- parallel branches
- }
- }
-}