Merge "add cfg01 restore pipeline"
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
new file mode 100644
index 0000000..96ca29d
--- /dev/null
+++ b/ceph-add-osd-upmap.groovy
@@ -0,0 +1,133 @@
+/**
+ *
+ * Add Ceph node to existing cluster using upmap mechanism
+ *
+ * Requred parameters:
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * HOST Host (minion id) to be added
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+orchestrate = new com.mirantis.mk.Orchestrate()
+
+def waitForHealthy(master, count=0, attempts=100) {
+ // wait for healthy cluster
+ while (count<attempts) {
+ def health = runCephCommand('ceph health')['return'][0].values()[0]
+ if (health.contains('HEALTH_OK')) {
+ common.infoMsg('Cluster is healthy')
+ break;
+ }
+ count++
+ sleep(10)
+ }
+}
+
+def runCephCommand(cmd) {
+ return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+}
+
+def getpgmap(master) {
+ return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+}
+
+def generatemapping(master,pgmap,map) {
+ def pg_new
+ def pg_old
+
+ for ( pg in pgmap )
+ {
+
+ pg_new = pg["up"].minus(pg["acting"])
+ pg_old = pg["acting"].minus(pg["up"])
+
+ for ( i = 0; i < pg_new.size(); i++ )
+ {
+ def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+ map.add(string)
+ }
+
+ }
+}
+
+def pepperEnv = "pepperEnv"
+
+timeout(time: 12, unit: 'HOURS') {
+ node("python") {
+
+ // create connection to salt master
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+ stage ("verify client versions")
+ {
+ def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not E@mon*")
+ for ( node in nodes )
+ {
+ def versions = salt.cmdRun("pepperEnv", node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+ versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
+ if ( versions['client']['group']['release'] != 'luminous' )
+ {
+ throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+ }
+ }
+ }
+
+ stage ("enable luminous compat")
+ {
+ runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+ }
+
+ stage ("enable upmap balancer")
+ {
+ runCephCommand('ceph balancer on')['return'][0].values()[0]
+ runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+ }
+
+
+ stage ("set norebalance")
+ {
+ runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+ }
+
+ stage('Install Ceph OSD') {
+ orchestrate.installCephOsd(pepperEnv, HOST)
+ }
+
+ def mapping = []
+
+ stage ("update mappings")
+ {
+ def pgmap1 = getpgmap(pepperEnv)
+ if ( pgmap1 == '' )
+ {
+ return 1
+ }
+ else
+ {
+ def pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap1)
+ for(int x=1; x<=3; x++){
+ pgmap1 = getpgmap(pepperEnv)
+ generatemapping(pepperEnv,pgmap,mapping)
+ mapping.each(this.&runCephCommand)
+ sleep(30)
+ }
+ }
+
+ }
+
+ stage ("unset norebalance")
+ {
+ runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+ }
+
+ stage ("wait for healthy cluster")
+ {
+ waitForHealthy(pepperEnv)
+ }
+
+ }
+}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 1412a76..de8d8fd 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -199,6 +199,12 @@
common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
}
+ // put reclass-system repo to branch/tag/ref. If empty will be used reclass pinned commit in AIO model.
+ if (common.validInputParam('RECLASS_SYSTEM_BRANCH')) {
+ common.infoMsg("Setting reclass-system repo to ${RECLASS_SYSTEM_BRANCH} branch/tag/ref")
+ envParams.put('cfg_reclass_system_branch', RECLASS_SYSTEM_BRANCH)
+ }
+
// put formulas revision - stable, testing or nightly
if (common.validInputParam('FORMULA_PKG_REVISION')) {
common.infoMsg("Setting formulas revision to ${FORMULA_PKG_REVISION}")
@@ -342,6 +348,9 @@
// Install
//
+ // Check if all minions are reachable and ready
+ salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*'])
+
if (common.checkContains('STACK_INSTALL', 'core')) {
stage('Install core infrastructure') {
def staticMgmtNetwork = false
@@ -361,8 +370,7 @@
stage('Install infra') {
if (common.checkContains('STACK_INSTALL', 'core') ||
- common.checkContains('STACK_INSTALL', 'openstack') ||
- common.checkContains('STACK_INSTALL', 'oss')) {
+ common.checkContains('STACK_INSTALL', 'openstack')) {
orchestrate.installInfra(venvPepper, extra_tgt)
}
}
@@ -376,7 +384,7 @@
// install k8s
if (common.checkContains('STACK_INSTALL', 'k8s')) {
extra_tgt_bckp = extra_tgt
- extra_tgt = 'and not kdt* ' + extra_tgt_bckp
+ extra_tgt = 'and not kdt* and not cfg* ' + extra_tgt_bckp
stage('Install Kubernetes infra') {
if (STACK_TYPE == 'aws') {
// configure kubernetes_control_address - save loadbalancer
@@ -460,8 +468,10 @@
stage('Install Kubernetes control for kdt') {
salt.enforceStateWithTest([saltId: venvPepper, target: "I@kubernetes:master ${extra_tgt}", state: 'kubernetes.master.kube-addons'])
+ salt.enforceStateWithTest([saltId: venvPepper, target: "I@kubernetes:master ${extra_tgt}", state: 'kubernetes.pool.images'])
orchestrate.installKubernetesControl(venvPepper, extra_tgt)
+ salt.enforceStateWithTest([saltId: venvPepper, target: "I@kubernetes:master ${extra_tgt}", state: 'nginx.server'])
// collect artifacts (kubeconfig)
writeFile(file: 'kubeconfig-kdt', text: salt.getFileContent(venvPepper, "I@kubernetes:master and *01* ${extra_tgt}", '/etc/kubernetes/admin-kube-config'))
archiveArtifacts(artifacts: 'kubeconfig-kdt')
@@ -548,12 +558,6 @@
}
}
- if (common.checkContains('STACK_INSTALL', 'oss')) {
- stage('Install Oss infra') {
- orchestrate.installOssInfra(venvPepper, extra_tgt)
- }
- }
-
if (common.checkContains('STACK_INSTALL', 'cicd')) {
stage('Install Cicd') {
extra_tgt_bckp = extra_tgt
@@ -587,16 +591,6 @@
}
}
- if (common.checkContains('STACK_INSTALL', 'oss')) {
- stage('Install OSS') {
- if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
- // In case if StackLightv2 enabled containers already started
- salt.enforceState(venvPepper, "I@docker:swarm:role:master and I@devops_portal:config ${extra_tgt}", 'docker.client', true)
- }
- orchestrate.installOss(venvPepper, extra_tgt)
- }
- }
-
//
// Test
//
@@ -641,47 +635,6 @@
}
}
- if (common.checkContains('STACK_TEST', 'openstack')) {
- if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
- test.install_docker(venvPepper, TEST_TEMPEST_TARGET)
- }
- stage('Run OpenStack tests') {
- test.runTempestTests(venvPepper, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
- }
-
- stage('Copy Tempest results to config node') {
- test.copyTempestResults(venvPepper, TEST_TEMPEST_TARGET)
- }
-
- stage('Archive rally artifacts') {
- test.archiveRallyArtifacts(venvPepper, TEST_TEMPEST_TARGET)
- }
-
- if (common.validInputParam('TESTRAIL_REPORT') && TESTRAIL_REPORT.toBoolean()) {
- stage('Upload test results to TestRail') {
- def date = sh(script: 'date +%Y-%m-%d', returnStdout: true).trim()
- def plan = TESTRAIL_PLAN ?: "[${TESTRAIL_MILESTONE}]System-Devcloud-${date}"
- def group = TESTRAIL_GROUP ?: STACK_TEMPLATE
-
- salt.cmdRun(venvPepper, TEST_TEMPEST_TARGET, "cd /root/rally_reports && cp \$(ls -t *xml | head -n1) report.xml")
- test.uploadResultsTestrail("/root/rally_reports/report.xml",
- TESTRAIL_REPORTER_IMAGE, group, TESTRAIL_QA_CREDENTIALS,
- plan, TESTRAIL_MILESTONE, TESTRAIL_SUITE)
- }
- }
- }
-
-
- if (common.checkContains('STACK_TEST', 'ceph')) {
- stage('Run infra tests') {
- sleep(120)
- def cmd = "apt-get install -y python-pip && pip install -r /usr/share/salt-formulas/env/ceph/files/testinfra/requirements.txt && python -m pytest --junitxml=/root/report.xml /usr/share/salt-formulas/env/ceph/files/testinfra/"
- salt.cmdRun(venvPepper, 'I@salt:master', cmd, false)
- writeFile(file: 'report.xml', text: salt.getFileContent(venvPepper, 'I@salt:master', '/root/report.xml'))
- junit(keepLongStdio: true, testResults: 'report.xml')
- }
- }
-
if (common.checkContains('STACK_TEST', 'opencontrail')) {
stage('Run opencontrail tests') {
def opencontrail_tests_dir = "/opt/opencontrail_test/fuel-plugin-contrail/plugin_test/vapor/"
diff --git a/cvp-sanity.groovy b/cvp-sanity.groovy
deleted file mode 100644
index 7adca5a..0000000
--- a/cvp-sanity.groovy
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- *
- * Launch sanity verification of the cloud
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- *
- * SANITY_TESTS_SET Leave empty for full run or choose a file (test), e.g. test_mtu.py
- * SANITY_TESTS_REPO CVP-sanity-checks repo to clone
- * SANITY_TESTS_SETTINGS Additional envrionment variables for cvp-sanity-checks
- * PROXY Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-def artifacts_dir = 'validation_artifacts/'
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try{
- stage('Initialization') {
- validate.prepareVenv(SANITY_TESTS_REPO, PROXY)
- }
-
- stage('Run Infra tests') {
- sh "mkdir -p ${artifacts_dir}"
- validate.runSanityTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, SANITY_TESTS_SET, artifacts_dir, SANITY_TESTS_SETTINGS)
- }
- stage ('Publish results') {
- archiveArtifacts artifacts: "${artifacts_dir}/*"
- junit "${artifacts_dir}/*.xml"
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- throw e
- }
- }
-}
diff --git a/cvp-spt.groovy b/cvp-spt.groovy
deleted file mode 100644
index b9d53d5..0000000
--- a/cvp-spt.groovy
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- *
- * Launch pytest frameworks in Jenkins
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- *
- * TESTS_SET Leave empty for full run or choose a file (test)
- * TESTS_REPO Repo to clone
- * TESTS_SETTINGS Additional environment varibales to apply
- * PROXY Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-node() {
- try{
- stage('Initialization') {
- validate.prepareVenv(TESTS_REPO, PROXY)
- }
-
- stage('Run Tests') {
- validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, '', TESTS_SETTINGS)
- }
- stage ('Publish results') {
- archiveArtifacts artifacts: "*"
- junit "*.xml"
- plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
- group: 'SPT',
- style: 'line',
- title: 'SPT Glance results',
- xmlSeries: [[
- file: "report.xml",
- nodeType: 'NODESET',
- url: '',
- xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
- plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
- group: 'SPT',
- style: 'line',
- title: 'SPT HW2HW results',
- xmlSeries: [[
- file: "report.xml",
- nodeType: 'NODESET',
- url: '',
- xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
- plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
- group: 'SPT',
- style: 'line',
- title: 'SPT VM2VM results',
- xmlSeries: [[
- file: "report.xml",
- nodeType: 'NODESET',
- url: '',
- xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- throw e
- }
-}
diff --git a/cvp-stacklight.groovy b/cvp-stacklight.groovy
deleted file mode 100644
index e7ce974..0000000
--- a/cvp-stacklight.groovy
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- *
- * Temporary pipeline for running cvp-stacklight job
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- *
- * TESTS_SET Leave empty for full run or choose a file (test)
- * TESTS_REPO Repo to clone
- * TESTS_SETTINGS Additional environment varibales to apply
- * PROXY Proxy to use for cloning repo or for pip
- *
- */
-
-validate = new com.mirantis.mcp.Validate()
-
-def artifacts_dir = 'validation_artifacts/'
-
-node() {
- stage('Initialization') {
- validate.prepareVenv(TESTS_REPO, PROXY)
- }
-
- stage('Run Tests') {
- sh "mkdir -p ${artifacts_dir}"
- validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
- }
- stage ('Publish results') {
- archiveArtifacts artifacts: "${artifacts_dir}/*"
- junit "${artifacts_dir}/*.xml"
- }
-}
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
new file mode 100644
index 0000000..c6fca0a
--- /dev/null
+++ b/cvp-tempest.groovy
@@ -0,0 +1,168 @@
+/**
+ *
+ * Launch CVP Tempest verification of the cloud
+ *
+ * Expected parameters:
+
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ * SERVICE_NODE Node, where runtest formula and some other states will be executed
+ * VERBOSE Show salt output in Jenkins console
+ * DEBUG_MODE Remove or keep container after the test
+ * STOP_ON_ERROR Stop pipeline if error during salt run occurs
+ * GENERATE_CONFIG Run runtest formula / generate Tempest config
+ * SKIP_LIST_PATH Path to skip list (not in use right now)
+ * TEST_IMAGE Docker image link to use for running container with testing tools.
+ * TARGET_NODE Node to run container with Tempest/Rally
+ * PREPARE_RESOURCES Prepare Openstack resources before test run
+ * TEMPEST_TEST_PATTERN Tests to run
+ * TEMPEST_ENDPOINT_TYPE Type of OS endpoint to use during test run (not in use right now)
+ * concurrency Number of threads to use for Tempest test run
+ * remote_artifacts_dir Folder to use for artifacts on remote node
+ * report_prefix Some prefix to put to report name
+ *
+ */
+
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def saltMaster
+extraYamlContext = env.getProperty('EXTRA_PARAMS')
+if (extraYamlContext) {
+ common.mergeEnv(env, extraYamlContext) }
+def SALT_MASTER_CREDENTIALS=(env.SALT_MASTER_CREDENTIALS) ?: 'salt'
+def VERBOSE = (env.VERBOSE) ?: true
+def DEBUG_MODE = (env.DEBUG_MODE) ?: false
+def STOP_ON_ERROR = (env.STOP_ON_ERROR) ?: false
+def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
+def report_prefix = (env.report_prefix) ?: ''
+def args = ''
+node() {
+ try{
+ stage('Initialization') {
+ deleteDir()
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (os_version == '') {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
+ TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+ runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+ if (runtest_node.values()[0]) {
+ // Let's use Service node that was defined in reclass. If several nodes are defined
+ // we will use the first from salt output
+ common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+ SERVICE_NODE = runtest_node.keySet()[0]
+ }
+ else {
+ common.infoMsg("Service node is not defined in reclass")
+ SERVICE_NODE = (env.SERVICE_NODE) ?: 'I@salt:master'
+ common.infoMsg("${SERVICE_NODE} will be used as Service node")
+ def classes_to_add = ["cluster.${cluster_name}.infra.runtest"]
+ fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+ common.infoMsg("Full service node name ${fullnodename}")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'classes': classes_to_add])
+ salt.checkResult(result)
+ }
+ common.infoMsg('Refreshing pillars on service node')
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
+ }
+ stage('Preparing resources') {
+ if ( PREPARE_RESOURCES.toBoolean() ) {
+ common.infoMsg('Running salt.minion state on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+ common.infoMsg('Running keystone.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running glance.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running nova.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+ }
+ else {
+ common.infoMsg('Skipping resources preparation')
+ }
+ }
+ stage('Generate config') {
+ if ( GENERATE_CONFIG ) {
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+ TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("TARGET_NODE is defined in Jenkins")
+ def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+ common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
+ salt.checkResult(result)
+ }
+ common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
+ // we need to refresh pillars on target node after runtest state
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("Reverting tempest_test_target parameter")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+ }
+ SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+ runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
+ if (SKIP_LIST_PATH) {
+ salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+ args += ' --blacklist-file /root/tempest/skip.list '
+ }
+ }
+ else {
+ common.infoMsg('Skipping Tempest config generation')
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+ }
+ }
+
+ stage('Run Tempest tests') {
+ // parameters: master, target, dockerImageLink, name, env_var, entrypoint, tempestConfLocalPath
+ validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', [], true,
+ '/root/test/tempest_generated.conf')
+ report_prefix += 'tempest_'
+ if (env.concurrency) {
+ args += ' -w ' + env.concurrency
+ }
+ if (TEMPEST_TEST_PATTERN == 'set=smoke') {
+ args += ' -s '
+ report_prefix += 'smoke'
+ }
+ else {
+ if (TEMPEST_TEST_PATTERN != 'set=full') {
+ args += " -r ${TEMPEST_TEST_PATTERN} "
+ report_prefix += 'full'
+ }
+ }
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' cvp /bin/bash -c 'run-tempest'")
+ }
+ stage('Collect results') {
+ report_prefix += "_report_${env.BUILD_NUMBER}"
+ // will be removed after changing runtest-formula logic
+ salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}/reports; mv ${remote_artifacts_dir}/report_* ${remote_artifacts_dir}/reports")
+ validate.addFiles(saltMaster, TARGET_NODE, "${remote_artifacts_dir}/reports", '')
+ sh "mv report_*.xml ${report_prefix}.xml"
+ sh "mv report_*.log ${report_prefix}.log"
+ archiveArtifacts artifacts: "${report_prefix}.*"
+ junit "${report_prefix}.xml"
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ } finally {
+ if (DEBUG_MODE == 'false') {
+ validate.runCleanup(saltMaster, TARGET_NODE)
+ }
+ }
+}
diff --git a/galera-cluster-verify-restore.groovy b/galera-cluster-verify-restore.groovy
index 7a908cb..054c0cc 100644
--- a/galera-cluster-verify-restore.groovy
+++ b/galera-cluster-verify-restore.groovy
@@ -7,6 +7,7 @@
* ASK_CONFIRMATION Ask confirmation for restore
* CHECK_TIME_SYNC Set to true to check time synchronization accross selected nodes.
* VERIFICATION_RETRIES Number of restries to verify the restoration.
+ * RESTORE_TYPE Sets restoration method
*
**/
@@ -16,6 +17,9 @@
def python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
def resultCode = 99
+def restoreType = env.RESTORE_TYPE
+def runRestoreDb = false
+def runBackupDb = false
askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
checkTimeSync = (env.getProperty('CHECK_TIME_SYNC') ?: true).toBoolean()
@@ -24,31 +28,42 @@
} else {
verificationRetries = 5
}
+if (restoreType.equals("BACKUP_AND_RESTORE") || restoreType.equals("ONLY_RESTORE")) {
+ runRestoreDb = true
+}
+if (restoreType.equals("BACKUP_AND_RESTORE")) {
+ runBackupDb = true
+}
timeout(time: 12, unit: 'HOURS') {
node() {
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- stage('Verify status')
+ stage('Verify status') {
resultCode = galera.verifyGaleraStatus(pepperEnv, false, checkTimeSync)
- stage('Restore') {
if (resultCode == 128) {
common.errorMsg("Unable to connect to Galera Master. Trying slaves...")
resultCode = galera.verifyGaleraStatus(pepperEnv, true, checkTimeSync)
if (resultCode == 129) {
- common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification and restoration.")
+ common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification, backup and restoration.")
currentBuild.result = "FAILURE"
return
} else if (resultCode == 130) {
- common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification and restoration.")
+ common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification, backup and restoration.")
currentBuild.result = "FAILURE"
return
}
}
if (resultCode == 131) {
- common.errorMsg("Time desynced - Click proceed when the issue is fixed or abort.")
+ common.errorMsg("Time desynced - Please fix this issue and rerun the pipeline.")
currentBuild.result = "FAILURE"
+ return
+ }
+ if (resultCode == 140 || resultCode == 141) {
+ common.errorMsg("Disk utilization check failed - Please fix this issue and rerun the pipeline.")
+ currentBuild.result = "FAILURE"
+ return
}
if (resultCode == 1) {
if(askConfirmation){
@@ -58,23 +73,38 @@
}
} else if (resultCode > 1) {
if(askConfirmation){
- common.warningMsg("There's something wrong with the cluster, do you want to run a restore?")
+ common.warningMsg("There's something wrong with the cluster, do you want to continue with backup and/or restore?")
} else {
- common.warningMsg("There's something wrong with the cluster, try to restore.")
+ common.warningMsg("There's something wrong with the cluster, try to backup and/or restore.")
}
} else {
if(askConfirmation){
- common.warningMsg("There seems to be everything alright with the cluster, do you still want to run a restore?")
+ common.warningMsg("There seems to be everything alright with the cluster, do you still want to continue with backup and/or restore?")
} else {
- common.warningMsg("There seems to be everything alright with the cluster, do nothing")
+ common.warningMsg("There seems to be everything alright with the cluster, no backup and no restoration will be done.")
+ currentBuild.result = "SUCCESS"
+ return
}
}
+ }
+ if (runBackupDb) {
+ stage('Backup') {
+ common.infoMsg("Running backup job.")
+ deployBuild = build( job: "galera-database-backup-pipeline", parameters: [
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL],
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+ [$class: 'StringParameterValue', name: 'OVERRIDE_BACKUP_NODE', value: "none"],
+ ]
+ )
+ }
+ }
+ stage('Restore') {
if(askConfirmation){
input message: "Are you sure you want to run a restore? Click to confirm"
}
try {
if((!askConfirmation && resultCode > 0) || askConfirmation){
- galera.restoreGaleraDb(pepperEnv)
+ galera.restoreGaleraCluster(pepperEnv, runRestoreDb)
}
} catch (Exception e) {
common.errorMsg("Restoration process has failed.")
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index b40bfec..e7887f9 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -70,10 +70,18 @@
if (isJobExists(testJob)) {
common.infoMsg("Test job ${testJob} found, running")
def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
- ]
+ if (JOBS_NAMESPACE.equals("salt-formulas")) {
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC],
+ [$class: 'StringParameterValue', name: 'GATING_GERRIT_BRANCH', value: GERRIT_BRANCH]
+ ]
+ } else {
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+ ]
+ }
giveVerify = true
} else {
common.infoMsg("Test job ${testJob} not found")
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index b672c4e..99ee3ea 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -8,6 +8,7 @@
**/
import static groovy.json.JsonOutput.toJson
import static groovy.json.JsonOutput.prettyPrint
+import org.apache.commons.net.util.SubnetUtils
common = new com.mirantis.mk.Common()
common2 = new com.mirantis.mcp.Common()
@@ -163,7 +164,12 @@
writeFile file: 'failsafe-ssh-key.pub', text: context['cfg_failsafe_ssh_public_key']
}
if (!fileExists(new File(templateEnv, 'tox.ini').toString())) {
- python.setupCookiecutterVirtualenv(cutterEnv)
+ reqs = new File(templateEnv, 'requirements.txt').toString()
+ if (fileExists(reqs)) {
+ python.setupVirtualenv(cutterEnv, 'python2', [], reqs)
+ } else {
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ }
python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, modelEnv, templateEnv, false)
} else {
// tox-based CC generated structure of reclass,from the root. Otherwise for bw compat, modelEnv
@@ -232,7 +238,7 @@
sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
args = [
- "--user-data user_data", , "--model ${modelEnv}",
+ "--user-data user_data", "--model ${modelEnv}",
"--mk-pipelines ${pipelineEnv}/mk-pipelines/", "--pipeline-library ${pipelineEnv}/pipeline-library/"
]
if (context['secrets_encryption_enabled'] == 'True') {
@@ -242,7 +248,10 @@
if (outdateGeneration) {
args.add('--ssh-key failsafe-ssh-key.pub')
} else {
- args.add('--ssh-keys failsafe-ssh-key.pub')
+ if (context.get('cfg_failsafe_user')) {
+ args.add('--ssh-keys failsafe-ssh-key.pub')
+ args.add("--cloud-user-name ${context.get('cfg_failsafe_user')}")
+ }
}
}
// load data from model
@@ -286,17 +295,27 @@
sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
}
+ // calculate netmask
+ def deployNetworkSubnet = ''
+ if (context.get('deploy_network_subnet')) {
+ def subnet = new SubnetUtils(context['deploy_network_subnet'])
+ deployNetworkSubnet = subnet.getInfo().getNetmask()
+ } else if (context.get('deploy_network_netmask')) { // case for 2018.4.0
+ deployNetworkSubnet = context['deploy_network_netmask']
+ } else {
+ error('Neither context parameter deploy_network_subnet or deploy_network_netmask should be set!')
+ }
// create cfg config-drive
if (outdateGeneration) {
- args += [ "--hostname ${context['salt_master_hostname']}", "${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso" ]
+ args += ["--hostname ${context['salt_master_hostname']}", "${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"]
sh "./create-config-drive ${args.join(' ')}"
} else {
args += [
"--name ${context['salt_master_hostname']}", "--hostname ${context['salt_master_hostname']}.${context['cluster_domain']}", "--clean-up",
- "--ip ${context['salt_master_management_address']}", "--netmask ${context['deploy_network_netmask']}", "--gateway ${context['deploy_network_gateway']}",
+ "--ip ${context['salt_master_management_address']}", "--netmask ${deployNetworkSubnet}", "--gateway ${context['deploy_network_gateway']}",
"--dns-nameservers ${context['dns_server01']},${context['dns_server02']}"
]
- sh "python ./create-config-drive.py ${args.join(' ')}"
+ sh "chmod 0755 create-config-drive.py ; ./create-config-drive.py ${args.join(' ')}"
}
sh("mkdir output-${context['cluster_name']} && mv ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso output-${context['cluster_name']}/")
@@ -324,7 +343,7 @@
sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
} else {
args = [
- "--ip ${context['aptly_server_deploy_address']}", "--netmask ${context['deploy_network_netmask']}", "--gateway ${context['deploy_network_gateway']}",
+ "--ip ${context['aptly_server_deploy_address']}", "--netmask ${deployNetworkSubnet}", "--gateway ${context['deploy_network_gateway']}",
"--user-data mirror_config", "--hostname ${aptlyServerHostname}.${context['cluster_domain']}", "--name ${aptlyServerHostname}", "--clean-up",
"--dns-nameservers ${context['dns_server01']},${context['dns_server02']}"
]
@@ -367,6 +386,19 @@
sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
}
// common.sendNotification(currentBuild.result,"",["slack"])
+ stage('Save artifacts to Artifactory') {
+ def artifactory = new com.mirantis.mcp.MCPArtifactory()
+ def buildProps = ["context=${context['cluster_name']}"]
+ if (RequesterEmail != '' && !RequesterEmail.contains('example')) {
+ buildProps.add("emailTo=${RequesterEmail}")
+ }
+ def artifactoryLink = artifactory.uploadJobArtifactsToArtifactory([
+ 'artifactory' : 'mcp-ci',
+ 'artifactoryRepo': "drivetrain-local/${JOB_NAME}/${context['cluster_name']}-${BUILD_NUMBER}",
+ 'buildProps' : buildProps,
+ ])
+ currentBuild.description += "<br/>${artifactoryLink}"
+ }
}
}
}
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index 8bfe467..8766678 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -4,7 +4,11 @@
timeout(time: 12, unit: 'HOURS') {
node() {
try{
- def branches = BRANCHES.tokenize(',')
+ if (BRANCHES.equals("*") || BRANCHES.contains('*')) {
+ branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
+ } else {
+ branches = BRANCHES.tokenize(',')
+ }
def pollBranches = []
for (i=0; i < branches.size(); i++) {
pollBranches.add([name:branches[i]])
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index fe7d189..224040f 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -268,7 +268,7 @@
def nodeShortName = target.tokenize(".")[0]
firstTarget = salt.getFirstMinion(pepperEnv, originalTarget)
- status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no | grep ${nodeShortName} | awk '{print \$2}'"
+ status = salt.cmdRun(pepperEnv, firstTarget, "kubectl get no ${nodeShortName} | tail -n+2 | awk '{print \$2}'"
)['return'][0].values()[0].replaceAll('Salt command execution success',''
).replaceAll(',SchedulingDisabled','').trim()
@@ -285,6 +285,13 @@
stage("Rebooting ${target}") {
debian.osReboot(pepperEnv, target)
+ /*
+ * Kubernetes controller manager will mark kubernetes node as NotReady
+ * only after 40 seconds of it's downtime.
+ * Let's wait for 60 sec to be sure that node will reach it's
+ * correct status.
+ */
+ sleep(60)
common.retry(times, delay) {
if(!isNodeReady(pepperEnv, target)) {
error("Node still not in Ready state...")
@@ -345,11 +352,9 @@
}
def executeConformance(pepperEnv, target, k8s_api, mcp_repo) {
- stage("Running conformance tests") {
- def image = buildImageURL(pepperEnv, target, mcp_repo)
- print("Using image: " + image)
- runConformance(pepperEnv, target, k8s_api, image)
- }
+ def image = buildImageURL(pepperEnv, target, mcp_repo)
+ print("Using image: " + image)
+ runConformance(pepperEnv, target, k8s_api, image)
}
def containerDinstalled(pepperEnv, target) {
@@ -669,21 +674,23 @@
def daemonsetMap = buildDaemonsetMap(pepperEnv, ctl_node)
if (CONFORMANCE_RUN_BEFORE.toBoolean()) {
- def target = CTL_TARGET
- def mcp_repo = ARTIFACTORY_URL
- def k8s_api = TEST_K8S_API_SERVER
- firstTarget = salt.getFirstMinion(pepperEnv, target)
- def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
- def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
- def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
- if (containerd_enabled && containerd_installed && conformance_pod_ready) {
- def config = ['master': pepperEnv,
- 'target': firstTarget,
- 'junitResults': false,
- 'autodetect': true]
- test.executeConformance(config)
- } else {
- executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ stage("Perform conformance run before upgrade") {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
+ def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
+ def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
+ if (containerd_enabled && containerd_installed && conformance_pod_ready) {
+ def config = ['master': pepperEnv,
+ 'target': firstTarget,
+ 'junitResults': false,
+ 'autodetect': true]
+ test.executeConformance(config)
+ } else {
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
}
}
@@ -812,21 +819,23 @@
printVersionInfo(pepperEnv, ctl_node)
if (CONFORMANCE_RUN_AFTER.toBoolean()) {
- def target = CTL_TARGET
- def mcp_repo = ARTIFACTORY_URL
- def k8s_api = TEST_K8S_API_SERVER
- firstTarget = salt.getFirstMinion(pepperEnv, target)
- def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
- def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
- def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
- if (containerd_enabled && containerd_installed && conformance_pod_ready) {
- def config = ['master': pepperEnv,
- 'target': firstTarget,
- 'junitResults': false,
- 'autodetect': true]
- test.executeConformance(config)
- } else {
- executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ stage("Perform conformance run after upgrade") {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ def containerd_enabled = containerDenabled(pepperEnv, firstTarget)
+ def containerd_installed = containerDinstalled(pepperEnv, firstTarget)
+ def conformance_pod_ready = conformancePodDefExists(pepperEnv, firstTarget)
+ if (containerd_enabled && containerd_installed && conformance_pod_ready) {
+ def config = ['master': pepperEnv,
+ 'target': firstTarget,
+ 'junitResults': false,
+ 'autodetect': true]
+ test.executeConformance(config)
+ } else {
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
}
}
} catch (Throwable e) {
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
deleted file mode 100644
index 61015f5..0000000
--- a/lab-pipeline.groovy
+++ /dev/null
@@ -1,390 +0,0 @@
-/**
-
-
-
-
-
-
-
-
-
-* This pipeline is deprecated, please use cloud-deploy-pipeline
-
-
-
-
-
-
-
-
-
-
-
-
- *
- * Launch heat stack with basic k8s
- * Flow parameters:
- * STACK_NAME Heat stack name
- * STACK_TYPE Orchestration engine: heat, ''
- * STACK_INSTALL What should be installed (k8s, openstack, ...)
- * STACK_TEST What should be tested (k8s, openstack, ...)
- *
- * STACK_TEMPLATE_URL URL to git repo with stack templates
- * STACK_TEMPLATE_BRANCH Stack templates repo branch
- * STACK_TEMPLATE_CREDENTIALS Credentials to the stack templates repo
- * STACK_TEMPLATE Heat stack HOT template
- * STACK_RECLASS_ADDRESS Stack reclass address
- * STACK_RECLASS_BRANCH Stack reclass repo branch
- * STACK_DELETE Delete stack when finished (bool)
- * STACK_REUSE Reuse stack (don't create one)
- * STACK_CLEANUP_JOB Name of job for deleting Heat stack
- *
- * Expected parameters:
- * required for STACK_TYPE=heat
- * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
- * HEAT_STACK_ZONE Heat stack availability zone
- * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
- * OPENSTACK_API_URL OpenStack API address
- * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
- * OPENSTACK_API_PROJECT OpenStack project to connect to
- * OPENSTACK_API_PROJECT_DOMAIN Domain for OpenStack project
- * OPENSTACK_API_PROJECT_ID ID for OpenStack project
- * OPENSTACK_API_USER_DOMAIN Domain for OpenStack user
- * OPENSTACK_API_CLIENT Versions of OpenStack python clients
- * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
- *
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- *
- * required for STACK_TYPE=NONE or empty string
- * SALT_MASTER_URL URL of Salt-API
- *
- * Test settings:
- * TEST_K8S_API_SERVER Kubernetes API address
- * TEST_K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
- *
- * TEST_TEMPEST_IMAGE Tempest image link
- * TEST_DOCKER_INSTALL Install docker on the target if tue
- * TEST_TEMPEST_PATTERN If not false, run tests matched to pattern only
- * TEST_TEMPEST_TARGET Salt target for tempest node
- *
- * optional parameters for overwriting soft params
- * SALT_OVERRIDES YAML with overrides for Salt deployment
- *
- */
-
-common = new com.mirantis.mk.Common()
-git = new com.mirantis.mk.Git()
-openstack = new com.mirantis.mk.Openstack()
-orchestrate = new com.mirantis.mk.Orchestrate()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-
-_MAX_PERMITTED_STACKS = 2
-
-node {
- // try to get STACK_INSTALL or fallback to INSTALL if exists
- try {
- def temporary = STACK_INSTALL
- } catch (MissingPropertyException e) {
- try {
- STACK_INSTALL = INSTALL
- env['STACK_INSTALL'] = INSTALL
- } catch (MissingPropertyException e2) {
- common.errorMsg("Property STACK_INSTALL or INSTALL not found!")
- }
- }
- try {
- //
- // Prepare machines
- //
- stage ('Create infrastructure') {
-
- if (STACK_TYPE == 'heat') {
- // value defaults
- def openstackCloud
- def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
- def openstackEnv = "${env.WORKSPACE}/venv"
-
- if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
- error("If you want to reuse existing stack you need to provide it's name")
- }
-
- if (STACK_REUSE.toBoolean() == false) {
- // Don't allow to set custom heat stack name
- wrap([$class: 'BuildUser']) {
- if (env.BUILD_USER_ID) {
- STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
- } else {
- STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
- }
- currentBuild.description = STACK_NAME
- }
- }
-
- // set description
- currentBuild.description = "${STACK_NAME}"
-
- // get templates
- git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
-
- // create openstack env
- openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
- openstackCloud = openstack.createOpenstackEnv(
- OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
- OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
- OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
- OPENSTACK_API_VERSION)
- openstack.getKeystoneToken(openstackCloud, openstackEnv)
- //
- // Verify possibility of create stack for given user and stack type
- //
- wrap([$class: 'BuildUser']) {
- if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !env.BUILD_USER_ID.equals("mceloud") && !STACK_REUSE.toBoolean()) {
- def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
- if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
- STACK_DELETE = "false"
- throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
- }
- }
- }
- // launch stack
- if (STACK_REUSE.toBoolean() == false) {
- stage('Launch new Heat stack') {
- // create stack
- envParams = [
- 'instance_zone': HEAT_STACK_ZONE,
- 'public_net': HEAT_STACK_PUBLIC_NET
- ]
- try {
- envParams.put('cfg_reclass_branch', STACK_RECLASS_BRANCH)
- envParams.put('cfg_reclass_address', STACK_RECLASS_ADDRESS)
- } catch (MissingPropertyException e) {
- common.infoMsg("Property STACK_RECLASS_BRANCH or STACK_RECLASS_ADDRESS not found! Using default values from template.")
- }
- openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
- }
- }
-
- // get SALT_MASTER_URL
- saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
- currentBuild.description = "${STACK_NAME}: ${saltMasterHost}"
-
- SALT_MASTER_URL = "http://${saltMasterHost}:6969"
- }
- }
-
- //
- // Connect to Salt master
- //
-
- def saltMaster
- stage('Connect to Salt API') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- // Set up override params
- if (env.getEnvironment().containsKey('SALT_OVERRIDES')) {
- stage('Set Salt overrides') {
- salt.setSaltOverrides(saltMaster, SALT_OVERRIDES)
- }
- }
-
- //
- // Install
- //
-
- if (common.checkContains('STACK_INSTALL', 'core')) {
- stage('Install core infrastructure') {
- orchestrate.installFoundationInfra(saltMaster)
-
- if (common.checkContains('STACK_INSTALL', 'kvm')) {
- orchestrate.installInfraKvm(saltMaster)
- orchestrate.installFoundationInfra(saltMaster)
- }
-
- orchestrate.validateFoundationInfra(saltMaster)
- }
- }
-
- // install k8s
- if (common.checkContains('STACK_INSTALL', 'k8s')) {
-
- // install infra libs for k8s
- stage('Install Kubernetes infra') {
- orchestrate.installKubernetesInfra(saltMaster)
- }
-
- // If k8s install with contrail network manager then contrail need to be install first
- if (common.checkContains('STACK_INSTALL', 'contrail')) {
- stage('Install Contrail for Kubernetes') {
- orchestrate.installContrailNetwork(saltMaster)
- orchestrate.installContrailCompute(saltMaster)
- orchestrate.installKubernetesContrailCompute(saltMaster)
- }
- }
-
- stage('Install Kubernetes control') {
- orchestrate.installKubernetesControl(saltMaster)
- }
- }
-
- // install openstack
- if (common.checkContains('STACK_INSTALL', 'openstack')) {
- // install Infra and control, tests, ...
-
- stage('Install OpenStack infra') {
- orchestrate.installOpenstackInfra(saltMaster)
- }
-
- stage('Install OpenStack control') {
- orchestrate.installOpenstackControl(saltMaster)
- }
-
- stage('Install OpenStack network') {
-
- if (common.checkContains('STACK_INSTALL', 'contrail')) {
- orchestrate.installContrailNetwork(saltMaster)
- } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
- orchestrate.installOpenstackNetwork(saltMaster)
- }
-
- salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; neutron net-list')
- salt.cmdRun(saltMaster, 'I@keystone:server', '. /root/keystonerc; nova net-list')
- }
-
- if (salt.testTarget(saltMaster, 'I@ironic:conductor')){
- stage('Install OpenStack Ironic conductor') {
- orchestrate.installIronicConductor(saltMaster)
- }
- }
-
-
- stage('Install OpenStack compute') {
- orchestrate.installOpenstackCompute(saltMaster)
-
- if (common.checkContains('STACK_INSTALL', 'contrail')) {
- orchestrate.installContrailCompute(saltMaster)
- }
- }
-
- }
-
-
- if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
- stage('Install StackLight v1') {
- orchestrate.installStacklightv1Control(saltMaster)
- orchestrate.installStacklightv1Client(saltMaster)
- }
- }
-
- if (common.checkContains('STACK_INSTALL', 'stacklight')) {
- stage('Install StackLight') {
- orchestrate.installDockerSwarm(saltMaster)
- orchestrate.installStacklight(saltMaster)
- }
- }
-
- //
- // Test
- //
- def artifacts_dir = '_artifacts/'
-
- if (common.checkContains('STACK_TEST', 'k8s')) {
- stage('Run k8s bootstrap tests') {
- def image = 'tomkukral/k8s-scripts'
- def output_file = image.replaceAll('/', '-') + '.output'
-
- // run image
- test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
-
- // collect output
- sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- sh "cat ${artifacts_dir}${output_file}"
-
- // collect artifacts
- archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
- }
-
- stage('Run k8s conformance e2e tests') {
- //test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, TEST_K8S_CONFORMANCE_IMAGE)
-
- def image = TEST_K8S_CONFORMANCE_IMAGE
- def output_file = image.replaceAll('/', '-') + '.output'
-
- // run image
- test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
-
- // collect output
- sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- sh "cat ${artifacts_dir}${output_file}"
-
- // collect artifacts
- archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
- }
- }
-
- if (common.checkContains('STACK_TEST', 'openstack')) {
- if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
- test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
- }
- stage('Run OpenStack tests') {
- test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
- }
-
- stage('Copy Tempest results to config node') {
- test.copyTempestResults(saltMaster, TEST_TEMPEST_TARGET)
- }
- }
-
- if (common.checkContains('STACK_INSTALL', 'finalize')) {
- stage('Finalize') {
- salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
- }
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- } finally {
-
-
- //
- // Clean
- //
-
- if (STACK_TYPE == 'heat') {
- // send notification
- common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
-
- if (STACK_DELETE.toBoolean() == true) {
- common.errorMsg('Heat job cleanup triggered')
- stage('Trigger cleanup job') {
- build(job: STACK_CLEANUP_JOB, parameters: [
- [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
- [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_URL', value: OPENSTACK_API_URL],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_CREDENTIALS', value: OPENSTACK_API_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_DOMAIN', value: OPENSTACK_API_PROJECT_DOMAIN],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT_ID', value: OPENSTACK_API_PROJECT_ID],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_USER_DOMAIN', value: OPENSTACK_API_USER_DOMAIN],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_CLIENT', value: OPENSTACK_API_CLIENT],
- [$class: 'StringParameterValue', name: 'OPENSTACK_API_VERSION', value: OPENSTACK_API_VERSION]
- ])
- }
- } else {
- if (currentBuild.result == 'FAILURE') {
- common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
-
- if (SALT_MASTER_URL) {
- common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
- }
- }
-
- }
- }
- }
-}
diff --git a/networking-test-l2gw-bgpvpn.groovy b/networking-test-l2gw-bgpvpn.groovy
new file mode 100644
index 0000000..0fadbd7
--- /dev/null
+++ b/networking-test-l2gw-bgpvpn.groovy
@@ -0,0 +1,101 @@
+/**
+ *
+ * Deploy env with l2gw and bgpvpn enabled from cc context
+ * using create-mcp-env job and run test on environment and download artifacts
+ *
+ * Expected parameters:
+ * MCP_ENV_PIPELINES_REFSPEC Used by rollout-mcp-env and delete-heat-stack-for-mcp-env
+ * MCP_ENV_HEAT_TEMPLATES_REFSPEC Used by rollout-mcp-env
+ * OPENSTACK_API_PROJECT OpenStack project name
+ * OPENSTACK_HEAT_AZ OpenStack availability zone
+ * OPENSTACK_ENVIRONMENT OpenStack environment
+ * HEAT_STACK_CONTEXT Same as in rollout-mcp-env
+ * STACK_DELETE Remove stack after test
+ * COOKIECUTTER_TEMPLATE_CONTEXT_FILE Path to file with base context from heat-templates
+ * COOKIECUTTER_EXTRA_CONTEXT Overrides base kubernetes_testing context
+ * EXTRA_REPOS Yaml based extra repos metadata to be added during bootstrap phase
+ * STACK_NAME The name of a stack in openstack (will be generated if empty)
+ * CLUSTER_MODEL_OVERRIDES List of cluster model yaml files parameters overrides (same as in create-mcp-env)
+ * SALT_MASTER_URL Full Salt API address.
+ * CLUSTER_MODEL_OVERRIDES List of cluster model yaml files parameters overrides (same as in create-mcp-env)
+ */
+
+common = new com.mirantis.mk.Common()
+
+def setBuildParameters(inputParams, allowedParams){
+ def result = []
+ allowedParams.each { param ->
+ if (inputParams.containsKey(param.name)) {
+ def value = inputParams[param.name]
+ def value_class = 'StringParameterValue'
+ switch (param.type) {
+ case 'boolean':
+ value = value.toBoolean()
+ value_class = 'BooleanParameterValue'
+ break
+ case 'text':
+ value_class = 'TextParameterValue'
+ break
+ }
+ result.add([
+ $class: value_class,
+ name: param.name,
+ value: value,
+ ])
+ }
+ }
+ return result
+}
+
+node ('python') {
+ def stack_name
+ if (common.validInputParam('STACK_NAME')) {
+ stack_name = STACK_NAME
+ } else {
+ stack_name = BUILD_TAG
+ }
+
+ currentBuild.description = stack_name
+
+ try {
+ stage ('Deploy cluster') {
+ deploy_build = build (job: "create-mcp-env", parameters: [
+ [$class: 'StringParameterValue', name: 'REFSPEC', value: MCP_ENV_PIPELINES_REFSPEC],
+ [$class: 'StringParameterValue', name: 'HEAT_TEMPLATES_REFSPEC', value: MCP_ENV_HEAT_TEMPLATES_REFSPEC],
+ [$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: OPENSTACK_API_PROJECT],
+ [$class: 'StringParameterValue', name: 'OS_AZ', value: OPENSTACK_HEAT_AZ],
+ [$class: 'StringParameterValue', name: 'OPENSTACK_ENVIRONMENT', value: OPENSTACK_ENVIRONMENT],
+ [$class: 'StringParameterValue', name: 'STACK_NAME', value: stack_name],
+ [$class: 'StringParameterValue', name: 'COOKIECUTTER_TEMPLATE_CONTEXT_FILE', value: COOKIECUTTER_TEMPLATE_CONTEXT_FILE],
+ [$class: 'TextParameterValue', name: 'HEAT_STACK_CONTEXT', value: HEAT_STACK_CONTEXT],
+ [$class: 'TextParameterValue', name: 'COOKIECUTTER_EXTRA_CONTEXT', value: COOKIECUTTER_EXTRA_CONTEXT],
+ [$class: 'TextParameterValue', name: 'EXTRA_REPOS', value: EXTRA_REPOS],
+ [$class: 'TextParameterValue', name: 'CLUSTER_MODEL_OVERRIDES', value: CLUSTER_MODEL_OVERRIDES],
+ ]
+ )
+ }
+
+ if (Boolean.valueOf(RUN_TESTS)) {
+ stage ('Run networking tests') {
+ common.infoMsg('TODO')
+ }
+ }
+
+ // get salt master url
+ saltMasterUrl = "http://${deploy_build.description.tokenize(' ')[1]}:6969"
+
+ } finally {
+ if (Boolean.valueOf(STACK_DELETE)) {
+ stage ('Delete stack') {
+ common.infoMsg("Trying to delete stack ${stack_name}")
+ build (job: 'delete-heat-stack-for-mcp-env', propagate: true, parameters: [
+ [$class: 'StringParameterValue', name: 'REFSPEC', value: MCP_ENV_PIPELINES_REFSPEC],
+ [$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: OPENSTACK_API_PROJECT],
+ [$class: 'StringParameterValue', name: 'OPENSTACK_ENVIRONMENT', value: OPENSTACK_ENVIRONMENT],
+ [$class: 'StringParameterValue', name: 'STACK_NAME', value: stack_name],
+ ]
+ )
+ }
+ }
+ }
+}
diff --git a/test-cookiecutter-reclass-chunk.groovy b/test-cookiecutter-reclass-chunk.groovy
index e0c9710..8c804a1 100644
--- a/test-cookiecutter-reclass-chunk.groovy
+++ b/test-cookiecutter-reclass-chunk.groovy
@@ -1,5 +1,3 @@
-package com.mirantis.mk
-
def common = new com.mirantis.mk.Common()
def saltModelTesting = new com.mirantis.mk.SaltModelTesting()
@@ -14,8 +12,8 @@
timeout(time: 1, unit: 'HOURS') {
node(slaveNode) {
stage("RunTest") {
+ extraVars = readYaml text: EXTRA_VARIABLES_YAML
try {
- extraVars = readYaml text: EXTRA_VARIABLES_YAML
currentBuild.description = extraVars.modelFile
sh(script: 'find . -mindepth 1 -delete || true', returnStatus: true)
sh(script: """
@@ -36,6 +34,9 @@
'testContext': extraVars.modelFile,
'dockerExtraOpts': [ '--memory=3g' ]
]
+ if (extraVars.DISTRIB_REVISION == 'nightly') {
+ config['nodegenerator'] = true
+ }
if (extraVars.useExtraRepos) {
config['extraRepos'] = extraVars.extraRepos ? extraVars.extraRepos : [:]
config['extraRepoMergeStrategy'] = extraVars.extraRepoMergeStrategy ? extraVars.extraRepoMergeStrategy : ''
@@ -46,6 +47,17 @@
currentBuild.result = "FAILURE"
currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
throw e
+ } finally {
+ stage('Save artifacts to Artifactory') {
+ def artifactory = new com.mirantis.mcp.MCPArtifactory()
+ def envGerritVars = [ "GERRIT_PROJECT=${extraVars.get('GERRIT_PROJECT', '')}", "GERRIT_CHANGE_NUMBER=${extraVars.get('GERRIT_CHANGE_NUMBER', '')}",
+ "GERRIT_PATCHSET_NUMBER=${extraVars.get('GERRIT_PATCHSET_NUMBER', '')}", "GERRIT_CHANGE_ID=${extraVars.get('GERRIT_CHANGE_ID', '')}",
+ "GERRIT_PATCHSET_REVISION=${extraVars.get('GERRIT_PATCHSET_REVISION', '')}" ]
+ withEnv(envGerritVars) {
+ def artifactoryLink = artifactory.uploadJobArtifactsToArtifactory(['artifactory': 'mcp-ci', 'artifactoryRepo': "drivetrain-local/${JOB_NAME}/${BUILD_NUMBER}"])
+ currentBuild.description += "<br/>${artifactoryLink}"
+ }
+ }
}
}
}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index bd5ec1e..aa695f2 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -15,8 +15,8 @@
import groovy.json.JsonOutput
common = new com.mirantis.mk.Common()
+mcpCommon = new com.mirantis.mcp.Common()
gerrit = new com.mirantis.mk.Gerrit()
-git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
extraVarsYAML = env.EXTRA_VARIABLES_YAML.trim() ?: ''
@@ -97,7 +97,7 @@
'buildId' : "${chunkJob.number}"])
}
-def StepTestModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
+def StepTestModel(_basename, _reclassArtifactName, _artifactCopyPath, _useExtraRepos = false) {
// We need to wrap what we return in a Groovy closure, or else it's invoked
// when this method is called, not when we pass it to parallel.
// To do this, you need to wrap the code below in { }, and either return
@@ -105,7 +105,7 @@
// return node object
return {
node(slaveNode) {
- testModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos)
+ testModel(_basename, _reclassArtifactName, _artifactCopyPath, _useExtraRepos)
}
}
}
@@ -130,9 +130,23 @@
def StepGenerateModels(_contextFileList, _virtualenv, _templateEnvDir) {
return {
+ if (fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
+ // Merge contexts for nice base.yml based diff
+ dir(_templateEnvDir) {
+ sh('tox -ve merge_contexts')
+ }
+ }
for (contextFile in _contextFileList) {
def basename = common.GetBaseName(contextFile, '.yml')
- def context = readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
+ def contextYaml = readYaml text: readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
+ // secrets_encryption overcomplicated for expected 'fast syntax tests'
+ // So, lets disable it. It would be tested only in generate-cookiecutter-products.groovy pipeline
+ if (contextYaml['default_context'].get('secrets_encryption_enabled')) {
+ common.warningMsg('Disabling secrets_encryption_enabled for tests!')
+ contextYaml['default_context']['secrets_encryption_enabled'] = 'False'
+ }
+
+ def context = mcpCommon.dumpYAML(contextYaml)
if (!fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
common.warningMsg('Forming NEW reclass-root structure...')
python.generateModel(context, basename, 'cfg01', _virtualenv, "${_templateEnvDir}/model", _templateEnvDir)
@@ -250,8 +264,8 @@
// copy reclass system under envPath with -R and trailing / to support symlinks direct copy
sh("cp -R ${archiveBaseName}/ ${envPath}/${classesSystemDir}")
dir(envPath) {
- for (String context : contextList) {
- def basename = common.GetBaseName(context, '.yml')
+ for (String _context : contextList) {
+ def basename = common.GetBaseName(_context, '.yml')
dir("${envPath}/model/${basename}/classes") {
sh(script: "ln -sfv ../../../${classesSystemDir} system ")
}
@@ -458,7 +472,11 @@
currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
throw e
} finally {
- def dummy = "dummy"
+ stage('Save artifacts to Artifactory') {
+ def artifactory = new com.mirantis.mcp.MCPArtifactory()
+ def artifactoryLink = artifactory.uploadJobArtifactsToArtifactory(['artifactory': 'mcp-ci', 'artifactoryRepo': "drivetrain-local/${JOB_NAME}/${BUILD_NUMBER}"])
+ currentBuild.description += "<br/>${artifactoryLink}"
+ }
}
}
}
diff --git a/test-model-generator.groovy b/test-model-generator.groovy
index ee7c559..39723c6 100644
--- a/test-model-generator.groovy
+++ b/test-model-generator.groovy
@@ -144,7 +144,7 @@
}
dir(uiProject) {
python.runVirtualenvCommand("${env.WORKSPACE}/venv",
- "export IMAGE=${uiImage.id}; docker-compose up -d")
+ "export IMAGE=${uiImage.id}; docker-compose -f docker-compose-test.yml up -d")
common.retry(5, 20) {
sh 'curl -v http://127.0.0.1:3000 > /dev/null'
}
@@ -168,6 +168,10 @@
currentBuild.result = "FAILURE"
throw e
} finally {
+ sh (script: """map=\$(docker ps --format '{{.Names}}:{{.ID}}' --filter name=operations);\
+ for cont in \$map ; do NAME="\${cont%%:*}";ID="\${cont##*:}"; docker logs \$ID > \$NAME.log 2>&1 ; done""")
+ archiveArtifacts "*.log"
+
if (fileExists(testReportHTMLFile)) {
archiveArtifacts artifacts: testReportHTMLFile
}
@@ -192,7 +196,7 @@
}
// Remove everything what is owned by root
testImage.inside(testImageOptions) {
- sh("rm -rf /var/lib/qa_reports/* ${env.WORKSPACE}/${apiProject} ${env.WORKSPACE}/${uiProject}")
+ sh("rm -rf /var/lib/qa_reports/* ${env.WORKSPACE}/${apiProject} ${env.WORKSPACE}/${uiProject} ${env.WORKSPACE}/*.log")
}
}
}
diff --git a/test-run-rally.groovy b/test-run-rally.groovy
deleted file mode 100644
index 3f2339f..0000000
--- a/test-run-rally.groovy
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- *
- * Service test pipeline
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- * Test settings:
- * IMAGE_LINK Link to docker image with Rally
- * RALLY_SCENARIO Rally test scenario
- * TEST_TARGET Salt target for Rally node
- * CONTAINER_NAME Name of the Docker container which runs Rally
- * CLEANUP_REPORTS_AND_CONTAINER Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
- * DO_CLEANUP_RESOURCES If "true": runs clean-up script for removing Rally and Tempest resources
- */
-
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try {
-
- //
- // Prepare connection
- //
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- //
- // Test
- //
-
- stage('Run OpenStack Rally scenario') {
- test.runRallyScenarios(pepperEnv, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
- DO_CLEANUP_RESOURCES)
- }
- stage('Copy test reports') {
- test.copyTempestResults(pepperEnv, TEST_TARGET)
- }
- stage('Archiving test artifacts') {
- test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- } finally {
- if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
- stage('Cleanup reports and container') {
- test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
- test.removeDockerContainer(pepperEnv, TEST_TARGET, CONTAINER_NAME)
- }
- }
- }
- }
-}
diff --git a/test-run-tempest.groovy b/test-run-tempest.groovy
deleted file mode 100644
index 6edb276..0000000
--- a/test-run-tempest.groovy
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- *
- * Service test pipeline
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- * Test settings:
- * IMAGE_LINK Link to docker image with Rally and Tempest
- * TEST_TEMPEST_PATTERN If not false, run tests matched to pattern only
- * TEST_TARGET Salt target for tempest node
- * CLEANUP_REPORTS Cleanup reports from rally,tempest container, remove all containers started the IMAGE_LINK
- * SET Predefined set for tempest tests
- * CONCURRENCY How many processes to use to run Tempest tests
- * DO_CLEANUP_RESOURCES If "true": runs clean-up script for removing Rally and Tempest resources
- */
-
-
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- //
- // Test
- //
-
- stage('Run OpenStack Tempest tests') {
- test.runTempestTests(pepperEnv, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
- "/home/rally/keystonercv3", SET, CONCURRENCY, "mcp.conf", "mcp_skip.list", "/root/keystonercv3",
- "/root/rally_reports", DO_CLEANUP_RESOURCES)
- }
- stage('Copy test reports') {
- test.copyTempestResults(pepperEnv, TEST_TARGET)
- }
- stage('Archiving test artifacts') {
- test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- } finally {
- if (CLEANUP_REPORTS.toBoolean()) {
- stage('Cleanup reports') {
- test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
- }
- }
- }
- }
-}
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index e007fe9..de631bf 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -22,6 +22,11 @@
openstack_credentials_id = OPENSTACK_API_CREDENTIALS
}
+env.GERRIT_BRANCH = 'master'
+if (common.validInputParam('GERRIT_PARENT_BRANCH')) {
+ env.GERRIT_BRANCH = GERRIT_PARENT_BRANCH
+}
+
def checkouted = false
def openstackTest = false
def travisLess = false /** TODO: Remove once formulas are witched to new config */
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 4326433..640cfed 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -9,13 +9,21 @@
common = new com.mirantis.mk.Common()
def gerrit = new com.mirantis.mk.Gerrit()
def ruby = new com.mirantis.mk.Ruby()
+def dockerLib = new com.mirantis.mk.Docker()
def gerritRef = env.GERRIT_REFSPEC ?: null
def defaultGitRef = env.DEFAULT_GIT_REF ?: null
def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
def slaveNode = env.SLAVE_NODE ?: 'virtual'
def saltVersion = env.SALT_VERSION ?: ""
-def dockerLib = new com.mirantis.mk.Docker()
+
+gerritBranch = 'master'
+if (common.validInputParam('GERRIT_BRANCH')) {
+ gerritBranch = env.GERRIT_BRANCH
+} else if (common.validInputParam('GATING_GERRIT_BRANCH')) {
+ gerritBranch = env.GATING_GERRIT_BRANCH
+ }
+
def checkouted = false
@@ -59,7 +67,8 @@
[$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: defaultGitRef],
[$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: defaultGitUrl],
[$class: 'StringParameterValue', name: 'SALT_OPTS', value: SALT_OPTS],
- [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
+ [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION],
+ [$class: 'StringParameterValue', name: 'GERRIT_PARENT_BRANCH', value: gerritBranch]
]
}
diff --git a/test-salt-model-wrapper.groovy b/test-salt-model-wrapper.groovy
deleted file mode 100644
index 42aa4e9..0000000
--- a/test-salt-model-wrapper.groovy
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- Global CI wrapper for testing next projects:
- - salt-models/reclass-system
- - mk/cookiecutter-templates
-
- Wrapper allows to test cross-project patches, based on
- 'Depends-On: http://<gerrit_address>/<change_number>' key phrase
- */
-
-import groovy.json.JsonOutput
-
-gerrit = new com.mirantis.mk.Gerrit()
-
-cookiecutterTemplatesRepo = 'mk/cookiecutter-templates'
-reclassSystemRepo = 'salt-models/reclass-system'
-slaveNode = env.getProperty('SLAVE_NODE') ?: 'virtual'
-
-voteMatrix = [
- 'test-mk-cookiecutter-templates' : true,
- 'test-drivetrain' : true,
- 'oscore-test-cookiecutter-models': false,
- 'test-salt-model-infra' : true,
- 'test-salt-model-mcp-virtual-lab': false,
-]
-
-baseGerritConfig = [:]
-buildTestParams = [:]
-jobResultComments = [:]
-commentLock = false
-
-// post Gerrit review comment to patch
-def setGerritReviewComment() {
- if (baseGerritConfig) {
- while (commentLock) {
- sleep 5
- }
- commentLock = true
- LinkedHashMap config = baseGerritConfig.clone()
- String jobResultComment = ''
- jobResultComments.each { threadName, info ->
- String skipped = voteMatrix.get(info.job, 'true') ? '' : '(non-voting)'
- jobResultComment += "- ${threadName} ${info.url}console : ${info.status} ${skipped}".trim() + '\n'
- }
- config['message'] = sh(script: "echo '${jobResultComment}'", returnStdout: true).trim()
- gerrit.postGerritComment(config)
- commentLock = false
- }
-}
-
-// get job parameters for YAML-based job parametrization
-def yamlJobParameters(LinkedHashMap jobParams) {
- return [
- [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: JsonOutput.toJson(jobParams)]
- ]
-}
-
-// run needed job with params
-def runTests(String jobName, ArrayList jobParams, String threadName = '', Boolean voteOverride = null) {
- threadName = threadName ? threadName : jobName
- def propagateStatus = voteOverride != null ? voteOverride : voteMatrix.get(jobName, true)
- return {
- def jobBuild = build job: jobName, propagate: false, parameters: jobParams
- jobResultComments[threadName] = ['url': jobBuild.absoluteUrl, 'status': jobBuild.result, 'job': jobName]
- setGerritReviewComment()
- if (propagateStatus && jobBuild.result == 'FAILURE') {
- throw new Exception("Build ${threadName} is failed!")
- }
- }
-}
-
-// set params based on depending patches
-def setupDependingVars(LinkedHashMap dependingProjects) {
- if (dependingProjects) {
- if (dependingProjects.containsKey(reclassSystemRepo)) {
- buildTestParams['RECLASS_SYSTEM_GIT_REF'] = dependingProjects[reclassSystemRepo].ref
- buildTestParams['RECLASS_SYSTEM_BRANCH'] = dependingProjects[reclassSystemRepo].branch
- }
- if (dependingProjects.containsKey(cookiecutterTemplatesRepo)) {
- buildTestParams['COOKIECUTTER_TEMPLATE_REF'] = dependingProjects[cookiecutterTemplatesRepo].ref
- buildTestParams['COOKIECUTTER_TEMPLATE_BRANCH'] = dependingProjects[cookiecutterTemplatesRepo].branch
- }
- }
-}
-
-timeout(time: 12, unit: 'HOURS') {
- node(slaveNode) {
- def common = new com.mirantis.mk.Common()
-
- // Var EXTRA_VARIABLES_YAML contains any additional parameters for tests,
- // like manually specified Gerrit Refs/URLs, additional parameters and so on
- def buildTestParamsYaml = env.getProperty('EXTRA_VARIABLES_YAML')
- if (buildTestParamsYaml) {
- common.mergeEnv(env, buildTestParamsYaml)
- buildTestParams = readYaml text: buildTestParamsYaml
- }
-
- // init required job variables
- LinkedHashMap job_env = env.getEnvironment().findAll { k, v -> v }
-
- // Gerrit parameters
- String gerritCredentials = job_env.get('CREDENTIALS_ID', 'gerrit')
- String gerritRef = job_env.get('GERRIT_REFSPEC')
- String gerritProject = job_env.get('GERRIT_PROJECT')
- String gerritName = job_env.get('GERRIT_NAME')
- String gerritScheme = job_env.get('GERRIT_SCHEME')
- String gerritHost = job_env.get('GERRIT_HOST')
- String gerritPort = job_env.get('GERRIT_PORT')
- String gerritChangeNumber = job_env.get('GERRIT_CHANGE_NUMBER')
- String gerritPatchSetNumber = job_env.get('GERRIT_PATCHSET_NUMBER')
- String gerritBranch = job_env.get('GERRIT_BRANCH')
- Boolean gateMode = job_env.get('GERRIT_CI_MERGE_TRIGGER', false).toBoolean()
-
- // Common and manual build parameters
- LinkedHashMap projectsMap = [:]
- String distribRevision = 'nightly'
- //checking if the branch is from release
- if (gerritBranch.startsWith('release')) {
- distribRevision = gerritBranch.tokenize('/')[-1]
- // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
- // After 2018q4 releases, need to also check 'static' repo, for example ubuntu.
- binTest = common.checkRemoteBinary(['mcp_version': distribRevision])
- if (!binTest.linux_system_repo_url || !binTest.linux_system_repo_ubuntu_url) {
- common.errorMsg("Binary release: ${distribRevision} not exist or not full. Fallback to 'proposed'! ")
- distribRevision = 'proposed'
- }
- }
- ArrayList testModels = job_env.get('TEST_MODELS', 'mcp-virtual-lab,infra').split(',')
-
- stage('Gerrit prepare') {
- // check if change aren't already merged
- def gerritChange = gerrit.getGerritChange(gerritName, gerritHost, gerritChangeNumber, gerritCredentials)
- if (gerritChange.status == "MERGED") {
- common.successMsg('Patch set is alredy merged, no need to test it')
- currentBuild.result = 'SUCCESS'
- return
- }
- buildTestParams << job_env.findAll { k, v -> k ==~ /GERRIT_.+/ }
- baseGerritConfig = [
- 'gerritName' : gerritName,
- 'gerritHost' : gerritHost,
- 'gerritPort' : gerritPort,
- 'gerritChangeNumber' : gerritChangeNumber,
- 'credentialsId' : gerritCredentials,
- 'gerritPatchSetNumber': gerritPatchSetNumber,
- ]
- LinkedHashMap gerritDependingProjects = gerrit.getDependentPatches(baseGerritConfig)
- setupDependingVars(gerritDependingProjects)
- ArrayList descriptionMsgs = [
- "Running with next parameters:",
- "Ref for ${gerritProject} => ${gerritRef}",
- "Branch for ${gerritProject} => ${gerritBranch}"
- ]
- descriptionMsgs.add("Distrib revision => ${distribRevision}")
- for (String project in gerritDependingProjects.keySet()) {
- descriptionMsgs.add("---")
- descriptionMsgs.add("Depending patch to ${project} found:")
- descriptionMsgs.add("Ref for ${project} => ${gerritDependingProjects[project]['ref']}")
- descriptionMsgs.add("Branch for ${project} => ${gerritDependingProjects[project]['branch']}")
- }
- currentBuild.description = descriptionMsgs.join('<br/>')
- gerrit.gerritPatchsetCheckout([
- credentialsId: gerritCredentials
- ])
- }
-
- stage("Run tests") {
- def documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
- if (documentationOnly) {
- common.infoMsg("Tests skipped, documenation only changed!")
- currentBuild.result = 'SUCCESS'
- return
- }
-
- def branches = [:]
- branches.failFast = false
- String branchJobName = ''
-
- if (gerritProject == reclassSystemRepo && gerritBranch == 'master') {
- sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD | grep .yml | xargs -I {} python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
- def defaultSystemURL = "${gerritScheme}://${gerritName}@${gerritHost}:${gerritPort}/${gerritProject}"
- for (int i = 0; i < testModels.size(); i++) {
- def cluster = testModels[i]
- def clusterGitUrl = defaultSystemURL.substring(0, defaultSystemURL.lastIndexOf("/") + 1) + cluster
- branchJobName = "test-salt-model-${cluster}"
- def jobParams = [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultSystemURL],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: gerritRef],
- ]
- branches[branchJobName] = runTests(branchJobName, jobParams)
- }
- }
- if (gerritProject == reclassSystemRepo || gerritProject == cookiecutterTemplatesRepo) {
- branchJobName = 'test-mk-cookiecutter-templates'
- branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
- }
-
- if (!gateMode) {
- // testing backward compatibility
- if (gerritBranch == 'master' && gerritProject == reclassSystemRepo) {
- def backwardCompatibilityRefsToTest = ['proposed', 'release/2018.11.0', 'release/2019.2.0']
- for (String oldRef in backwardCompatibilityRefsToTest) {
- LinkedHashMap buildTestParamsOld = buildTestParams.clone()
- buildTestParamsOld['COOKIECUTTER_TEMPLATE_REF'] = ''
- buildTestParamsOld['COOKIECUTTER_TEMPLATE_BRANCH'] = oldRef
- String threadName = "${branchJobName}-${oldRef}"
- // disable votes for release/2018.11.0 branch
- overrideVote = oldRef == 'release/2018.11.0' ? false : null
- branches[threadName] = runTests(branchJobName, yamlJobParameters(buildTestParamsOld), threadName, overrideVote)
- }
- }
- if (gerritProject == cookiecutterTemplatesRepo) {
- branchJobName = 'test-drivetrain'
- branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
- branchJobName = 'oscore-test-cookiecutter-models'
- branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
- }
- }
-
- branches.keySet().each { key ->
- if (branches[key] instanceof Closure) {
- jobResultComments[key] = ['url': job_env.get('BUILD_URL'), 'status': 'WAITING']
- }
- }
- setGerritReviewComment()
- parallel branches
- }
- }
-}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 77c0a26..02e9270 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -67,11 +67,12 @@
def validateReclassModel(ArrayList saltMinions, String suffix) {
try {
- for(String minion in saltMinions) {
- common.infoMsg("Reclass model validation for minion ${minion}...")
- def ret = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false)
- def reclassInv = ret.values()[0]
- writeFile file: "inventory-${minion}-${suffix}.out", text: reclassInv.toString()
+ dir(suffix) {
+ for(String minion in saltMinions) {
+ common.infoMsg("Reclass model validation for minion ${minion}...")
+ def ret = salt.cmdRun("${workspace}/${venvPepper}", 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0]
+ writeFile file: minion, text: ret.toString()
+ }
}
} catch (Exception e) {
common.errorMsg('Can not validate current Reclass model. Inspect failed minion manually.')
@@ -79,12 +80,17 @@
}
}
-def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix='before', String newSuffix='after') {
- for(String minion in saltMinions) {
- def fileName = "reclass-model-${minion}-diff.out"
- sh "diff -u inventory-${minion}-${oldSuffix}.out inventory-${minion}-${newSuffix}.out > ${fileName} || true"
- archiveArtifacts artifacts: "${fileName}"
+def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix, String newSuffix) {
+ def diffDir = 'pillarsDiff'
+ dir(diffDir) {
+ for(String minion in saltMinions) {
+ def fileName = "reclass-model-${minion}-diff.out"
+ sh "diff -u ${workspace}/${oldSuffix}/${minion} ${workspace}/${newSuffix}/${minion} > ${fileName} || true"
+ }
}
+ archiveArtifacts artifacts: "${workspace}/${oldSuffix}"
+ archiveArtifacts artifacts: "${workspace}/${newSuffix}"
+ archiveArtifacts artifacts: "${workspace}/${diffDir}"
}
if (common.validInputParam('PIPELINE_TIMEOUT')) {
@@ -96,9 +102,10 @@
}
timeout(time: pipelineTimeout, unit: 'HOURS') {
- node("python") {
+ node("python && docker") {
try {
workspace = common.getWorkspace()
+ deleteDir()
targetMcpVersion = null
if (!common.validInputParam('TARGET_MCP_VERSION') && !common.validInputParam('MCP_VERSION')) {
error('You must specify MCP version in TARGET_MCP_VERSION|MCP_VERSION variable')
@@ -129,6 +136,10 @@
def updatePipelines = ''
def updateLocalRepos = ''
def reclassSystemBranch = ''
+ def reclassSystemBranchDefault = gitTargetMcpVersion
+ if (gitTargetMcpVersion != 'proposed') {
+ reclassSystemBranchDefault = "origin/${gitTargetMcpVersion}"
+ }
def driteTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
if (driteTrainParamsYaml) {
def driteTrainParams = readYaml text: driteTrainParamsYaml
@@ -138,7 +149,7 @@
updateClusterModel = driteTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
updatePipelines = driteTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
updateLocalRepos = driteTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
- reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', gitTargetMcpVersion)
+ reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
} else {
// backward compatibility for 2018.11.0
saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -147,20 +158,23 @@
updateClusterModel = env.getProperty('UPDATE_CLUSTER_MODEL').toBoolean()
updatePipelines = env.getProperty('UPDATE_PIPELINES').toBoolean()
updateLocalRepos = env.getProperty('UPDATE_LOCAL_REPOS').toBoolean()
- reclassSystemBranch = gitTargetMcpVersion
+ reclassSystemBranch = reclassSystemBranchDefault
}
python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
+ def pillarsBeforeSuffix = 'pillarsBefore'
+ def pillarsAfterSuffix = 'pillarsAfter'
def inventoryBeforeFilename = "reclass-inventory-before.out"
def inventoryAfterFilename = "reclass-inventory-after.out"
def minions = salt.getMinions(venvPepper, '*')
+ def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
stage("Update Reclass and Salt-Formulas ") {
- validateReclassModel(minions, 'before')
+ validateReclassModel(minions, pillarsBeforeSuffix)
+ archiveReclassInventory(inventoryBeforeFilename)
- def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
try {
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
}
@@ -170,7 +184,7 @@
if (updateClusterModel) {
common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
def dateTime = common.getDatetime()
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git submodule update")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git submodule foreach git fetch")
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
"grep -r --exclude-dir=aptly -l 'mcp_version: .*' * | xargs --no-run-if-empty sed -i 's|mcp_version: .*|mcp_version: \"$targetMcpVersion\"|g'")
// Do the same, for deprecated variable-duplicate
@@ -211,9 +225,112 @@
"git add -u && git commit --allow-empty -m 'Cluster model update to the release $targetMcpVersion on $dateTime'")
}
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'saltutil.refresh_pillar')
+ try {
+ salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
+ } catch (Exception e) {
+ common.errorMsg("Something wrong with model after UPDATE_CLUSTER_MODEL step. Please check model.")
+ throw e
+ }
+
+ common.infoMsg('Running a check for compatibility with new Reclass/Salt-Formulas packages')
+ def saltModelDir = 'salt-model'
+ def nodesArtifact = 'pillarsFromValidation.tar.gz'
+ def reclassModel = 'reclassModel.tar.gz'
+ def pillarsAfterValidation = 'pillarsFromValidation'
+ try {
+ def repos = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:repo").get("return")[0].values()[0]
+ def cfgInfo = salt.getPillar(venvPepper, 'I@salt:master', "reclass:storage:node:infra_cfg01_node").get("return")[0].values()[0]
+ def docker_image_for_test = salt.getPillar(venvPepper, 'I@salt:master', "_param:docker_image_cvp_sanity_checks").get("return")[0].values()[0]
+ def saltModelTesting = new com.mirantis.mk.SaltModelTesting()
+ def config = [
+ 'dockerHostname': "cfg01",
+ 'distribRevision': "${targetMcpVersion}",
+ 'baseRepoPreConfig': true,
+ 'extraRepoMergeStrategy': 'override',
+ 'dockerContainerName': 'new-reclass-package-check',
+ 'dockerMaxCpus': 1,
+ 'image': docker_image_for_test,
+ 'dockerExtraOpts': [
+ "-v ${env.WORKSPACE}/${saltModelDir}:/srv/salt/reclass",
+ "--entrypoint ''",
+ ],
+ 'extraRepos': ['repo': repos, 'aprConfD': "APT::Get::AllowUnauthenticated 'true';" ],
+ 'envOpts': [ "CLUSTER_NAME=${cluster_name}", "NODES_ARTIFACT_NAME=${nodesArtifact}" ]
+ ]
+ def tarName = '/tmp/currentModel.tar.gz'
+ salt.cmdRun(venvPepper, 'I@salt:master', "tar -cf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass classes")
+ if (cfgInfo == '') {
+ // case for old setups when cfg01 node model was static
+ def node_name = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:name").get("return")[0].values()[0]
+ def node_domain = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:domain").get("return")[0].values()[0]
+ salt.cmdRun(venvPepper, 'I@salt:master', "tar -rf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass nodes/${node_name}.${node_domain}.yml")
+ config['envOpts'].add("CFG_NODE_NAME=${node_name}.${node_domain}")
+ }
+ def modelHash = salt.cmdRun(venvPepper, 'I@salt:master', "cat ${tarName} | gzip -9 -c | base64", false, null, false).get('return')[0].values()[0]
+ writeFile file: 'modelHash', text: modelHash
+ sh "cat modelHash | base64 -d | gzip -d > ${reclassModel}"
+ sh "mkdir ${saltModelDir} && tar -xf ${reclassModel} -C ${saltModelDir}"
+
+ config['runCommands'] = [
+ '001_Install_Salt_Reclass_Packages': { sh('apt-get install -y reclass salt-formula-*') },
+ '002_Get_new_nodes': {
+ try {
+ sh('''#!/bin/bash
+ new_generated_dir=/srv/salt/_new_nodes
+ new_pillar_dir=/srv/salt/_new_pillar
+ reclass_classes=/srv/salt/reclass/classes/
+ mkdir -p ${new_generated_dir} ${new_pillar_dir}
+ nodegenerator -b ${reclass_classes} -o ${new_generated_dir} ${CLUSTER_NAME}
+ for node in $(ls ${new_generated_dir}); do
+ nodeName=$(basename -s .yml ${node})
+ reclass -n ${nodeName} -c ${reclass_classes} -u ${new_generated_dir} > ${new_pillar_dir}/${nodeName}
+ done
+ if [[ -n "${CFG_NODE_NAME}" ]]; then
+ reclass -n ${CFG_NODE_NAME} -c ${reclass_classes} -u /srv/salt/reclass/nodes > ${new_pillar_dir}/${CFG_NODE_NAME}
+ fi
+ tar -czf /tmp/${NODES_ARTIFACT_NAME} -C ${new_pillar_dir}/ .
+ ''')
+ } catch (Exception e) {
+ print "Test new nodegenerator tool is failed: ${e}"
+ throw e
+ }
+ },
+ ]
+ config['runFinally'] = [ '001_Archive_nodegenerator_artefact': {
+ sh(script: "mv /tmp/${nodesArtifact} ${env.WORKSPACE}/${nodesArtifact}")
+ archiveArtifacts artifacts: nodesArtifact
+ }]
+ saltModelTesting.setupDockerAndTest(config)
+ def pillarsValidationDiff = "${pillarsAfterValidation}/diffFromOriginal"
+ sh "mkdir -p ${pillarsValidationDiff} && tar -xf ${nodesArtifact} --dir ${pillarsAfterValidation}/"
+ def changesFound = false
+ for(String minion in minions) {
+ try {
+ sh (script:"diff -u -w -I '^Salt command execution success' -I '^ node: ' -I '^ uri: ' -I '^ timestamp: ' ${pillarsBeforeSuffix}/${minion} ${pillarsAfterValidation}/${minion} > ${pillarsValidationDiff}/${minion}", returnStdout: true)
+ } catch(Exception e) {
+ changesFound = true
+ archiveArtifacts artifacts: "${pillarsValidationDiff}/${minion}"
+ def buildUrl = env.BUILD_URL ? env.BUILD_URL : "${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_NUMBER}"
+ common.errorMsg("Found diff changes for ${minion} minion: ${buildUrl}/artifact/${pillarsValidationDiff}/${minion}/*view*/ ")
+ }
+ }
+ if (changesFound) {
+ common.warningMsg('Found diff changes between current pillar data and updated. Inspect logs above.')
+ input message: 'Continue anyway?'
+ } else {
+ common.infoMsg('Diff between current pillar data and updated one - not found.')
+ }
+ } catch (Exception updateErr) {
+ common.warningMsg(updateErr)
+ common.warningMsg('Failed to validate update Salt Formulas repos/packages.')
+ input message: 'Continue anyway?'
+ } finally {
+ sh "rm -rf ${saltModelDir} ${nodesArtifact} ${pillarsAfterValidation} ${reclassModel}"
+ }
+
try {
common.infoMsg('Perform: UPDATE Salt Formulas')
- salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas",'salt.master.env'])
} catch (Exception updateErr) {
@@ -222,8 +339,6 @@
input message: 'Continue anyway?'
}
- archiveReclassInventory(inventoryBeforeFilename)
-
try {
common.infoMsg('Perform: UPDATE Reclass package')
salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'pkg.install', ["reclass"])
@@ -256,12 +371,11 @@
sh "diff -u $inventoryBeforeFilename $inventoryAfterFilename > reclass-inventory-diff.out || true"
archiveArtifacts artifacts: "reclass-inventory-diff.out"
- validateReclassModel(minions, 'after')
- archiveReclassModelChanges(minions)
+ validateReclassModel(minions, pillarsAfterSuffix)
+ archiveReclassModelChanges(minions, pillarsBeforeSuffix, pillarsAfterSuffix)
}
if (updateLocalRepos) {
- def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
stage("Update local repos") {
common.infoMsg("Updating local repositories")
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index fa9a7a6..930a27d 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -1,30 +1,20 @@
/**
*
- * Launch validation of the cloud
+ * Launch validation of the cloud with Rally
*
* Expected parameters:
+ *
+ * JOB_TIMEOUT Job timeout in hours
* SALT_MASTER_URL URL of Salt master
* SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * VALIDATE_PARAMS Validate job YAML params (see below)
*
- * TEST_IMAGE Docker image link
- * TARGET_NODE Salt target for tempest node
- * TEMPEST_TEST_SET If not false, run tests matched to pattern only
- * TEMPEST_CONFIG_REPO Git repository with configuration files for Tempest
- * TEMPEST_CONFIG_BRANCH Git branch which will be used during the checkout
- * TEMPEST_REPO Git repository with Tempest
- * TEMPEST_VERSION Version of Tempest (tag, branch or commit)
- * RUN_TEMPEST_TESTS If not false, run Tempest tests
- * RUN_RALLY_TESTS If not false, run Rally tests
- * K8S_RALLY If not false, run Kubernetes Rally tests
- * STACKLIGHT_RALLY If not false, run additional Stacklight tests
- * RUN_K8S_TESTS If not false, run Kubernetes e2e/conformance tests
- * RUN_SPT_TESTS If not false, run SPT tests
- * SPT_SSH_USER The name of the user which should be used for ssh to nodes
- * SPT_IMAGE The name of the image for SPT tests
- * SPT_IMAGE_USER The name of the user for SPT image
- * SPT_FLAVOR The name of the flavor for SPT image
+ * Rally - map with parameters for starting Rally tests
+ *
* AVAILABILITY_ZONE The name of availability zone
* FLOATING_NETWORK The name of the external(floating) network
+ * K8S_RALLY Use Kubernetes Rally plugin for testing K8S cluster
+ * STACKLIGHT_RALLY Use Stacklight Rally plugin for testing Stacklight
* RALLY_IMAGE The name of the image for Rally tests
* RALLY_FLAVOR The name of the flavor for Rally image
* RALLY_PLUGINS_REPO Git repository with Rally plugins
@@ -34,132 +24,363 @@
* RALLY_SCENARIOS Path to file or directory with rally scenarios
* RALLY_SL_SCENARIOS Path to file or directory with stacklight rally scenarios
* RALLY_TASK_ARGS_FILE Path to file with rally tests arguments
- * REPORT_DIR Path for reports outside docker image
- * TEST_K8S_API_SERVER Kubernetes API address
- * TEST_K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
- * TEST_K8S_NODE Kubernetes node to run tests from
- * GENERATE_REPORT If not false, run report generation command
- * ACCUMULATE_RESULTS If true, results from the previous build will be used
- * JOB_TIMEOUT Job timeout in hours
+ * RALLY_DB_CONN_STRING Rally-compliant DB connection string for long-term storing
+ * results to external DB
+ * RALLY_TAGS List of tags for marking Rally tasks. Can be used when
+ * generating Rally trends based on particular group of tasks
+ * RALLY_TRENDS If enabled, generate Rally trends report. Requires external DB
+ * connection string to be set. If RALLY_TAGS was set, trends will
+ * be generated based on finished tasks with these tags, otherwise
+ * on all the finished tasks available in DB
* SKIP_LIST List of the Rally scenarios which should be skipped
*
+ * PARALLEL_PERFORMANCE If enabled, run Rally tests separately in parallel for each sub directory found
+ * inside RALLY_SCENARIOS and RALLY_SL_SCENARIOS (if STACKLIGHT_RALLY is enabled)
*/
common = new com.mirantis.mk.Common()
-test = new com.mirantis.mk.Test()
validate = new com.mirantis.mcp.Validate()
-def python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+salt_testing = new com.mirantis.mk.SaltModelTesting()
-def pepperEnv = "pepperEnv"
-def artifacts_dir = 'validation_artifacts/'
-if (env.JOB_TIMEOUT == ''){
- job_timeout = 12
-} else {
- job_timeout = env.JOB_TIMEOUT.toInteger()
+def VALIDATE_PARAMS = readYaml(text: env.getProperty('VALIDATE_PARAMS')) ?: [:]
+if (! VALIDATE_PARAMS) {
+ throw new Exception("VALIDATE_PARAMS yaml is empty.")
}
-timeout(time: job_timeout, unit: 'HOURS') {
- node() {
- try{
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
+def TEST_IMAGE = env.getProperty('TEST_IMAGE') ?: 'xrally-openstack:1.4.0'
+def JOB_TIMEOUT = env.getProperty('JOB_TIMEOUT').toInteger() ?: 12
+def SLAVE_NODE = env.getProperty('SLAVE_NODE') ?: 'docker'
+def rally = VALIDATE_PARAMS.get('rally') ?: [:]
+def scenariosRepo = rally.get('RALLY_CONFIG_REPO') ?: 'https://review.gerrithub.io/Mirantis/scale-scenarios'
+def scenariosBranch = rally.get('RALLY_CONFIG_BRANCH') ?: 'master'
+def pluginsRepo = rally.get('RALLY_PLUGINS_REPO') ?: 'https://github.com/Mirantis/rally-plugins'
+def pluginsBranch = rally.get('RALLY_PLUGINS_BRANCH') ?: 'master'
+def tags = rally.get('RALLY_TAGS') ?: []
- stage('Configure') {
- validate.installDocker(pepperEnv, TARGET_NODE)
- if (ACCUMULATE_RESULTS.toBoolean() == false) {
- sh "rm -r ${artifacts_dir}"
+// contrainer working dir vars
+def rallyWorkdir = '/home/rally'
+def rallyPluginsDir = "${rallyWorkdir}/rally-plugins"
+def rallyScenariosDir = "${rallyWorkdir}/rally-scenarios"
+def rallyResultsDir = "${rallyWorkdir}/test_results"
+def rallySecrets = "${rallyWorkdir}/secrets"
+
+// env vars
+def env_vars = []
+def platform = [
+ type: 'unknown',
+ stacklight: [enabled: false, grafanaPass: ''],
+]
+def cmp_count
+
+// test results vars
+def testResult
+def tasksParallel = [:]
+def parallelResults = [:]
+def configRun = [:]
+
+timeout(time: JOB_TIMEOUT, unit: 'HOURS') {
+ node (SLAVE_NODE) {
+
+ // local dir vars
+ def workDir = "${env.WORKSPACE}/rally"
+ def pluginsDir = "${workDir}/rally-plugins"
+ def scenariosDir = "${workDir}/rally-scenarios"
+ def secrets = "${workDir}/secrets"
+ def artifacts = "${workDir}/validation_artifacts"
+
+ stage('Configure env') {
+
+ def master = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+ // create local directories
+ sh "rm -rf ${workDir} || true"
+ sh "mkdir -p ${artifacts} ${secrets}"
+ writeFile file: "${workDir}/entrypoint.sh", text: '''#!/bin/bash
+set -xe
+exec "$@"
+'''
+ sh "chmod 755 ${workDir}/entrypoint.sh"
+
+ // clone repo with Rally plugins and checkout refs/branch
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD']],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: pluginsDir]],
+ userRemoteConfigs: [[url: pluginsRepo, refspec: pluginsBranch]],
+ ])
+
+ // clone scenarios repo and switch branch / fetch refspecs
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD']],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: scenariosDir]],
+ userRemoteConfigs: [[url: scenariosRepo, refspec: scenariosBranch]],
+ ])
+
+ // get number of computes in the cluster
+ platform['cluster_name'] = salt.getPillar(
+ master, 'I@salt:master', '_param:cluster_name'
+ )['return'][0].values()[0]
+ def rcs_str_node = salt.getPillar(
+ master, 'I@salt:master', 'reclass:storage:node'
+ )['return'][0].values()[0]
+
+ // set up Openstack env variables
+ if (rally.get('K8S_RALLY').toBoolean() == false) {
+
+ platform['type'] = 'openstack'
+ platform['cmp_count'] = rcs_str_node.openstack_compute_rack01['repeat']['count']
+ def rally_variables = [
+ "floating_network=${rally.FLOATING_NETWORK}",
+ "rally_image=${rally.RALLY_IMAGE}",
+ "rally_flavor=${rally.RALLY_FLAVOR}",
+ "availability_zone=${rally.AVAILABILITY_ZONE}",
+ ]
+
+ env_vars = validate._get_keystone_creds_v3(master)
+ if (!env_vars) {
+ env_vars = validate._get_keystone_creds_v2(master)
}
- sh "mkdir -p ${artifacts_dir}"
+ env_vars = env_vars + rally_variables
+
+ } else {
+ // set up Kubernetes env variables get required secrets
+ platform['type'] = 'k8s'
+ platform['cmp_count'] = rcs_str_node.kubernetes_compute_rack01['repeat']['count']
+
+ def kubernetes = salt.getPillar(
+ master, 'I@kubernetes:master and *01*', 'kubernetes:master'
+ )['return'][0].values()[0]
+
+ env_vars = [
+ "KUBERNETES_HOST=http://${kubernetes.apiserver.vip_address}" +
+ ":${kubernetes.apiserver.insecure_port}",
+ "KUBERNETES_CERT_AUTH=${rallySecrets}/k8s-ca.crt",
+ "KUBERNETES_CLIENT_KEY=${rallySecrets}/k8s-client.key",
+ "KUBERNETES_CLIENT_CERT=${rallySecrets}/k8s-client.crt",
+ ]
+
+ // get K8S certificates to manage cluster
+ def k8s_ca = salt.getFileContent(
+ master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/ca-kubernetes.crt'
+ )
+ def k8s_client_key = salt.getFileContent(
+ master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.key'
+ )
+ def k8s_client_crt = salt.getFileContent(
+ master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.crt'
+ )
+ writeFile file: "${secrets}/k8s-ca.crt", text: k8s_ca
+ writeFile file: "${secrets}/k8s-client.key", text: k8s_client_key
+ writeFile file: "${secrets}/k8s-client.crt", text: k8s_client_crt
+
}
- stage('Run Tempest tests') {
- if (RUN_TEMPEST_TESTS.toBoolean() == true) {
- validate.runTempestTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, TEMPEST_CONFIG_REPO, TEMPEST_CONFIG_BRANCH, TEMPEST_REPO, TEMPEST_VERSION, TEMPEST_TEST_SET)
- } else {
- common.infoMsg("Skipping Tempest tests")
+ // get Stacklight data
+ if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
+ platform['stacklight']['enabled'] = true
+
+ def grafana = salt.getPillar(
+ master, 'I@grafana:client', 'grafana:client:server'
+ )['return'][0].values()[0]
+
+ platform['stacklight']['grafanaPass'] = grafana['password']
+ }
+
+ if (! rally.PARALLEL_PERFORMANCE.toBoolean()) {
+
+ // Define map with docker commands
+ def commands = validate.runRallyTests(
+ platform, rally.RALLY_SCENARIOS,
+ rally.RALLY_SL_SCENARIOS, rally.RALLY_TASK_ARGS_FILE,
+ rally.RALLY_DB_CONN_STRING, tags,
+ rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+ )
+ def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
+
+ configRun = [
+ 'image': TEST_IMAGE,
+ 'baseRepoPreConfig': false,
+ 'dockerMaxCpus': 2,
+ 'dockerHostname': 'localhost',
+ 'dockerExtraOpts': [
+ "--network=host",
+ "--entrypoint=/entrypoint.sh",
+ "-w ${rallyWorkdir}",
+ "-v ${workDir}/entrypoint.sh:/entrypoint.sh",
+ "-v ${pluginsDir}/:${rallyPluginsDir}",
+ "-v ${scenariosDir}/:${rallyScenariosDir}",
+ "-v ${artifacts}/:${rallyResultsDir}",
+ "-v ${secrets}/:${rallySecrets}",
+ ],
+ 'envOpts' : env_vars,
+ 'runCommands' : commands_list,
+ ]
+ common.infoMsg('Docker config:')
+ println configRun
+ common.infoMsg('Docker commands list:')
+ println commands
+
+ } else {
+
+ // Perform parallel testing of the components with Rally
+ def components = [
+ Common: [],
+ Stacklight: [],
+ ]
+
+ // get list of directories inside scenarios path
+ def scenPath = "${scenariosDir}/${rally.RALLY_SCENARIOS}"
+ def mainComponents = sh(
+ script: "find ${scenPath} -maxdepth 1 -mindepth 1 -type d -exec basename {} \\;",
+ returnStdout: true,
+ ).trim()
+ if (! mainComponents) {
+ error(
+ "No directories found inside RALLY_SCENARIOS ${rally.RALLY_SCENARIOS}\n" +
+ "Either set PARALLEL_PERFORMANCE=false or populate ${rally.RALLY_SCENARIOS} " +
+ "with component directories which include corresponding scenarios"
+ )
}
- }
+ components['Common'].addAll(mainComponents.split('\n'))
+ common.infoMsg( "Adding for parallel execution sub dirs found in " +
+ "RALLY_SCENARIOS (${rally.RALLY_SCENARIOS}):"
+ )
+ print mainComponents
- stage('Run Rally tests') {
- if (RUN_RALLY_TESTS.toBoolean() == true) {
- def report_dir = env.REPORT_DIR ?: '/root/qa_results'
- def platform = ["type":"unknown", "stacklight_enabled":false]
- def rally_variables = []
- if (K8S_RALLY.toBoolean() == false) {
- platform['type'] = 'openstack'
- rally_variables = ["floating_network=${FLOATING_NETWORK}",
- "rally_image=${RALLY_IMAGE}",
- "rally_flavor=${RALLY_FLAVOR}",
- "availability_zone=${AVAILABILITY_ZONE}"]
- } else {
- platform['type'] = 'k8s'
+ if (rally.STACKLIGHT_RALLY.toBoolean() == true) {
+ def slScenPath = "${scenariosDir}/${rally.RALLY_SL_SCENARIOS}"
+ def slComponents = sh(
+ script: "find ${slScenPath} -maxdepth 1 -mindepth 1 -type d -exec basename {} \\;",
+ returnStdout: true,
+ ).trim()
+ if (! slComponents) {
+ error(
+ "No directories found inside RALLY_SCENARIOS ${rally.RALLY_SL_SCENARIOS}\n" +
+ "Either set PARALLEL_PERFORMANCE=false or populate ${rally.RALLY_SL_SCENARIOS} " +
+ "with component directories which include corresponding scenarios"
+ )
}
- if (STACKLIGHT_RALLY.toBoolean() == true) {
- platform['stacklight_enabled'] = true
+ components['Stacklight'].addAll(slComponents.split('\n'))
+ common.infoMsg( "Adding for parallel execution sub dirs found in " +
+ "RALLY_SL_SCENARIOS (${rally.RALLY_SL_SCENARIOS}):"
+ )
+ print slComponents
+ }
+
+ // build up a map with tasks for parallel execution
+ def allComponents = components.values().flatten()
+ for (int i=0; i < allComponents.size(); i++) {
+ // randomize run so we don't bump each other at the startup
+ // also we need to let first thread create rally deployment
+ // so all the rest rally threads can use it after
+ def sleepSeconds = 15 * i
+
+ def task = allComponents[i]
+ def task_name = 'rally_' + task
+ def curComponent = components.find { task in it.value }.key
+ // inherit platform common data
+ def curPlatform = platform
+
+ // setup scenarios and stacklight switch per component
+ def commonScens = "${rally.RALLY_SCENARIOS}/${task}"
+ def stacklightScens = "${rally.RALLY_SL_SCENARIOS}/${task}"
+
+ switch (curComponent) {
+ case 'Common':
+ stacklightScens = ''
+ curPlatform['stacklight']['enabled'] = false
+ break
+ case 'Stacklight':
+ commonScens = ''
+ curPlatform['stacklight']['enabled'] = true
+ break
}
- validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, platform, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_PLUGINS_REPO, RALLY_PLUGINS_BRANCH, RALLY_SCENARIOS, RALLY_SL_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables, report_dir, SKIP_LIST)
- } else {
- common.infoMsg("Skipping Rally tests")
+
+ def curCommands = validate.runRallyTests(
+ curPlatform, commonScens,
+ stacklightScens, rally.RALLY_TASK_ARGS_FILE,
+ rally.RALLY_DB_CONN_STRING, tags,
+ rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+ )
+
+ // copy required files for the current task
+ def taskWorkDir = "${env.WORKSPACE}/rally_" + task
+ def taskPluginsDir = "${taskWorkDir}/rally-plugins"
+ def taskScenariosDir = "${taskWorkDir}/rally-scenarios"
+ def taskArtifacts = "${taskWorkDir}/validation_artifacts"
+ def taskSecrets = "${taskWorkDir}/secrets"
+ sh "rm -rf ${taskWorkDir} || true"
+ sh "cp -ra ${workDir} ${taskWorkDir}"
+
+ def curCommandsList = curCommands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
+ def curConfigRun = [
+ 'image': TEST_IMAGE,
+ 'baseRepoPreConfig': false,
+ 'dockerMaxCpus': 2,
+ 'dockerHostname': 'localhost',
+ 'dockerExtraOpts': [
+ "--network=host",
+ "--entrypoint=/entrypoint.sh",
+ "-w ${rallyWorkdir}",
+ "-v ${taskWorkDir}/entrypoint.sh:/entrypoint.sh",
+ "-v ${taskPluginsDir}/:${rallyPluginsDir}",
+ "-v ${taskScenariosDir}/:${rallyScenariosDir}",
+ "-v ${taskArtifacts}/:${rallyResultsDir}",
+ "-v ${taskSecrets}/:${rallySecrets}",
+ ],
+ 'envOpts' : env_vars,
+ 'runCommands' : curCommandsList,
+ ]
+
+ tasksParallel['rally_' + task] = {
+ sleep sleepSeconds
+ common.infoMsg("Docker config for task $task")
+ println curConfigRun
+ common.infoMsg("Docker commands list for task $task")
+ println curCommands
+ parallelResults[task_name] = salt_testing.setupDockerAndTest(curConfigRun)
+ }
}
}
+ }
- stage('Run SPT tests') {
- if (RUN_SPT_TESTS.toBoolean() == true) {
- def spt_variables = ["spt_ssh_user=${SPT_SSH_USER}",
- "spt_floating_network=${FLOATING_NETWORK}",
- "spt_image=${SPT_IMAGE}",
- "spt_user=${SPT_IMAGE_USER}",
- "spt_flavor=${SPT_FLAVOR}",
- "spt_availability_zone=${AVAILABILITY_ZONE}"]
- validate.runSptTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, spt_variables)
- } else {
- common.infoMsg("Skipping SPT tests")
+ stage('Run Rally tests') {
+
+ def dockerStatuses = [:]
+
+ // start tests in Docker
+ if (! rally.PARALLEL_PERFORMANCE.toBoolean()) {
+ testResult = salt_testing.setupDockerAndTest(configRun)
+ dockerStatuses['rally'] = (testResult) ? 'OK' : 'FAILED'
+ } else {
+ common.infoMsg('Jobs to run in threads: ' + tasksParallel.keySet().join(' '))
+ parallel tasksParallel
+ parallelResults.each { task ->
+ dockerStatuses[task.key] = (task.value) ? 'OK' : 'FAILED'
}
}
-
- stage('Run K8S bootstrap tests') {
- if (RUN_K8S_TESTS.toBoolean() == true) {
- def image = 'tomkukral/k8s-scripts'
- def output_file = 'k8s-bootstrap-tests.txt'
- def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
- test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
-
- def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- } else {
- common.infoMsg("Skipping k8s bootstrap tests")
+ // safely archiving all possible results
+ dockerStatuses.each { task ->
+ print "Collecting results for ${task.key} (docker status = '${task.value}')"
+ try {
+ archiveArtifacts artifacts: "${task.key}/validation_artifacts/*"
+ } catch (Throwable e) {
+ print 'failed to get artifacts'
}
}
+ // setting final job status
+ def failed = dockerStatuses.findAll { it.value == 'FAILED' }
+ if (failed.size() == dockerStatuses.size()) {
+ currentBuild.result = 'FAILURE'
+ } else if (dockerStatuses.find { it.value != 'OK' }) {
+ currentBuild.result = 'UNSTABLE'
+ }
+ }
- stage('Run K8S conformance e2e tests') {
- if (RUN_K8S_TESTS.toBoolean() == true) {
- def image = TEST_K8S_CONFORMANCE_IMAGE
- def output_file = 'report-k8s-e2e-tests.txt'
- def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
- test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
-
- def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- } else {
- common.infoMsg("Skipping k8s conformance e2e tests")
- }
- }
- stage('Generate report') {
- if (GENERATE_REPORT.toBoolean() == true) {
- common.infoMsg("Generating html test report ...")
- validate.generateTestReport(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir)
- } else {
- common.infoMsg("Skipping report generation")
- }
- }
- stage('Collect results') {
- archiveArtifacts artifacts: "${artifacts_dir}/*"
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
+ stage('Clean env') {
+ // remove secrets
+ sh 'find ./ -type d -name secrets -exec rm -rf \\\"{}\\\" \\; || true'
}
}
}