Merge "Refactoring for using minion names instead of pillars and some standart functions instead of cmds Related-Prod: #PROD-30065 (PROD:30065) Related-Prod: #PROD-29949 (PROD:29949)"
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e616a28..0fba6a0 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -90,6 +90,10 @@
stage('Remove Ceph RGW') {
salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
}
+
+ stage('Purge Ceph RGW pkgs') {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
+ }
}
if (HOST_TYPE.toLowerCase() != 'osd') {
@@ -222,7 +226,7 @@
// purge Ceph pkgs
stage('Purge Ceph OSD pkgs') {
- runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
}
stage('Remove OSD host from crushmap') {
@@ -294,6 +298,10 @@
salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
}
}
+
+ stage('Purge Ceph MON pkgs') {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
+ }
}
if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86a1f0f..cc8a84d 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -16,6 +16,9 @@
* STAGE_UPGRADE_OSD Set to True if Ceph osd nodes upgrade is desired
* STAGE_UPGRADE_RGW Set to True if Ceph rgw nodes upgrade is desired
* STAGE_UPGRADE_CLIENT Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
+ * STAGE_FINALIZE Set to True if configs recommended for TARGET_RELEASE should be set after upgrade is done
+ * BACKUP_ENABLED Select to copy the disks of Ceph VMs before upgrade and backup Ceph directories on OSD nodes
+ * BACKUP_DIR Select the target dir to backup to when BACKUP_ENABLED
*
*/
@@ -71,12 +74,12 @@
waitForHealthy(master)
try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
} catch (Exception e) {
common.warningMsg('Backup already exists')
}
try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('Backup already exists')
}
@@ -129,10 +132,12 @@
stage("Verify services for ${minion}") {
sleep(10)
- runCephCommand(master, ADMIN_HOST, "ceph -s")
+ runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
+ waitForHealthy(master)
}
stage('Ask for manual confirmation') {
+ runCephCommand(master, ADMIN_HOST, "ceph -s")
input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
}
}
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 0c657a5..80160ab 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -31,7 +31,15 @@
try{
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ if (!TARGET_NODE) {
+ // This pillar will return us cid01
+ TARGET_NODE = "I@gerrit:client"
+ }
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index b33cda6..e933984 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -37,7 +37,15 @@
try {
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ if (!TEMPEST_TARGET_NODE) {
+ // This pillar will return us cid01
+ TEMPEST_TARGET_NODE = "I@gerrit:client"
+ }
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 74c9a63..ebb7987 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -27,20 +27,29 @@
try{
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ if (!TARGET_NODE) {
+ // This pillar will return us cid01
+ TARGET_NODE = "I@gerrit:client"
+ }
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
+ container_name = "${env.JOB_NAME}"
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
if (!keystone_creds) {
keystone_creds = validate._get_keystone_creds_v2(saltMaster)
}
- validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
- validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "")
+ validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, container_name, keystone_creds)
+ validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "", "internalURL", "", "", [], container_name)
}
stage('Run Rally tests') {
sh "mkdir -p ${artifacts_dir}"
- validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir)
+ validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir, "docker-rally", container_name)
}
stage('Collect results') {
@@ -55,7 +64,8 @@
throw e
} finally {
if (DEBUG_MODE == 'false') {
- validate.runCleanup(saltMaster, TARGET_NODE)
+ validate.openstack_cleanup(saltMaster, TARGET_NODE, container_name)
+ validate.runCleanup(saltMaster, TARGET_NODE, container_name)
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
}
}
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 1b1d5e0..bd2862a 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -23,7 +23,6 @@
/*
YAML example
=====
-
# commands is a map of commands which looks like step_name: shell_command
commands:
001_prepare: rm /var/lib/g.txt
@@ -35,19 +34,19 @@
- SALT_USERNAME=admin
- SALT_PASSWORD=password
- drivetrain_version=testing
-
*/
node (SLAVE_NODE) {
def artifacts_dir = 'validation_artifacts'
+ def test_suite_name = "${env.JOB_NAME}"
+ def xml_file = "${test_suite_name}_report.xml"
+
def configRun = [:]
try {
withEnv(env_vars) {
stage('Initialization') {
def container_workdir = '/var/lib'
- def test_suite_name = "${env.JOB_NAME}"
def workdir = "${container_workdir}/${test_suite_name}"
- def xml_file = "${test_suite_name}_report.xml"
def tests_set = (env.getProperty('tests_set')) ?: ''
def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -vv ${tests_set}"
@@ -62,17 +61,20 @@
def env_vars_list = [
"SALT_USERNAME=${creds.username}",
"SALT_PASSWORD=${creds.password}",
- "SALT_URL=${SALT_MASTER_URL}"
+ "SALT_URL=${SALT_MASTER_URL}",
+ "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
] + env_vars
// Generating final config
configRun = [
'image': IMAGE,
+ 'dockerPull': false,
'baseRepoPreConfig': false,
'dockerMaxCpus': 2,
'dockerExtraOpts' : [
"--network=host",
"-v /root/qa_results/:/root/qa_results/",
+ "-v /etc/ssl/certs/:/etc/ssl/certs/:ro",
"-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
],
'envOpts' : env_vars_list,
@@ -93,28 +95,28 @@
style: 'line',
title: 'SPT Glance results',
xmlSeries: [[
- file: "${env.JOB_NAME}_report.xml",
+ file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+ xpath: '/testsuite/testcase[@classname="tests.test_glance"]/properties/property']]
plot csvFileName: 'plot-hw2hw.csv',
group: 'SPT',
style: 'line',
title: 'SPT HW2HW results',
xmlSeries: [[
- file: "${env.JOB_NAME}_report.xml",
+ file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+ xpath: '/testsuite/testcase[@classname="tests.test_hw2hw"]/properties/property']]
plot csvFileName: 'plot-vm2vm.csv',
group: 'SPT',
style: 'line',
title: 'SPT VM2VM results',
xmlSeries: [[
- file: "${env.JOB_NAME}_report.xml",
+ file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+ xpath: '/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
}
}
}
diff --git a/cvp-shaker.groovy b/cvp-shaker.groovy
index 08f9315..1f04bc1 100644
--- a/cvp-shaker.groovy
+++ b/cvp-shaker.groovy
@@ -154,9 +154,11 @@
}
stage('Run Shaker tests') {
- if (! salt_testing.setupDockerAndTest(configRun)) {
- common.warningMsg('Docker contrainer failed to run Shaker')
- currentBuild.result = 'FAILURE'
+ timeout(time: 10, unit: 'HOURS') {
+ if (! salt_testing.setupDockerAndTest(configRun)) {
+ common.warningMsg('Docker contrainer failed to run Shaker')
+ currentBuild.result = 'FAILURE'
+ }
}
}
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index c6fca0a..d8087b3 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -19,6 +19,8 @@
* TEMPEST_ENDPOINT_TYPE Type of OS endpoint to use during test run (not in use right now)
* concurrency Number of threads to use for Tempest test run
* remote_artifacts_dir Folder to use for artifacts on remote node
+ * runtest_tempest_cfg_dir Folder to use to generate and store tempest.conf
+ * runtest_tempest_cfg_name Tempest config name
* report_prefix Some prefix to put to report name
*
*/
@@ -33,103 +35,115 @@
if (extraYamlContext) {
common.mergeEnv(env, extraYamlContext) }
def SALT_MASTER_CREDENTIALS=(env.SALT_MASTER_CREDENTIALS) ?: 'salt'
-def VERBOSE = (env.VERBOSE) ?: true
+def VERBOSE = (env.VERBOSE) ? env.VERBOSE.toBoolean() : true
def DEBUG_MODE = (env.DEBUG_MODE) ?: false
-def STOP_ON_ERROR = (env.STOP_ON_ERROR) ?: false
+def STOP_ON_ERROR = (env.STOP_ON_ERROR) ? env.STOP_ON_ERROR.toBoolean() : false
def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+// do not change unless you know what you're doing
def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
def report_prefix = (env.report_prefix) ?: ''
def args = ''
+def mounts = [:]
node() {
- try{
- stage('Initialization') {
- deleteDir()
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
- os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
- if (os_version == '') {
- throw new Exception("Openstack is not found on this env. Exiting")
- }
- TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
- runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
- if (runtest_node.values()[0]) {
- // Let's use Service node that was defined in reclass. If several nodes are defined
- // we will use the first from salt output
- common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
- SERVICE_NODE = runtest_node.keySet()[0]
- }
- else {
- common.infoMsg("Service node is not defined in reclass")
- SERVICE_NODE = (env.SERVICE_NODE) ?: 'I@salt:master'
- common.infoMsg("${SERVICE_NODE} will be used as Service node")
- def classes_to_add = ["cluster.${cluster_name}.infra.runtest"]
- fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
- common.infoMsg("Full service node name ${fullnodename}")
+ stage('Initialization') {
+ deleteDir()
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ container_name = "${env.JOB_NAME}"
+ cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
+ TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+ runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+ if (runtest_node.values()[0]) {
+ // Let's use Service node that was defined in reclass. If several nodes are defined
+ // we will use the first from salt output
+ common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+ SERVICE_NODE = runtest_node.keySet()[0]
+ }
+ else {
+ throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+ "into reclass. Check documentation for more details")
+ }
+ common.infoMsg('Refreshing pillars on service node')
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ // default node is cid01 (preferably) or cfg01
+ default_node=salt.getPillar(saltMaster, 'I@salt:master', '_param:cicd_control_node01_hostname')['return'][0].values()[0] ?: 'cfg01'
+ // fetch tempest_test_target from runtest.yaml, otherwise fallback to default_node
+ tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: default_node+'*'
+ // TARGET_NODE will always override any settings above
+ TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+ // default is /root/test/
+ runtest_tempest_cfg_dir = (env.runtest_tempest_cfg_dir) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0]
+ // default is tempest_generated.conf
+ runtest_tempest_cfg_name = (env.runtest_tempest_cfg_name) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_name')['return'][0].values()[0]
+ common.infoMsg("runtest_tempest_cfg is ${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}")
+ }
+ stage('Preparing resources') {
+ if ( PREPARE_RESOURCES.toBoolean() ) {
+ common.infoMsg('Running salt.minion state on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+ common.infoMsg('Running keystone.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running glance.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running nova.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+ }
+ else {
+ common.infoMsg('Skipping resources preparation')
+ }
+ }
+ stage('Generate config') {
+ if ( GENERATE_CONFIG.toBoolean() ) {
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${runtest_tempest_cfg_dir}"])
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${runtest_tempest_cfg_dir}"])
+ fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("TARGET_NODE is defined in Jenkins")
+ def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+ common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
- null, null, ['name': fullnodename, 'classes': classes_to_add])
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
salt.checkResult(result)
}
- common.infoMsg('Refreshing pillars on service node')
- salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
- tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
- }
- stage('Preparing resources') {
- if ( PREPARE_RESOURCES.toBoolean() ) {
- common.infoMsg('Running salt.minion state on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
- common.infoMsg('Running keystone.client on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
- common.infoMsg('Running glance.client on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
- common.infoMsg('Running nova.client on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
- }
- else {
- common.infoMsg('Skipping resources preparation')
- }
- }
- stage('Generate config') {
- if ( GENERATE_CONFIG ) {
- salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
- salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
- fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
- TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
- if (TARGET_NODE != tempest_node) {
- common.infoMsg("TARGET_NODE is defined in Jenkins")
- def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
- common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
- result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
- null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
- salt.checkResult(result)
- }
- common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
- salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
- salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ // runtest state hangs if tempest_test_target is cfg01*
+ // let's run runtest.generate_tempest_config only for this case
+ if (TARGET_NODE == 'cfg01*') {
+ common.warningMsg("It is not recommended to run Tempest container on cfg node, but.. proceeding")
+ salt.enforceState(saltMaster, SERVICE_NODE, 'runtest.generate_tempest_config', VERBOSE, STOP_ON_ERROR)
+ } else {
salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
- // we need to refresh pillars on target node after runtest state
- salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
- if (TARGET_NODE != tempest_node) {
- common.infoMsg("Reverting tempest_test_target parameter")
- result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
- null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
- }
- SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
- runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
- if (SKIP_LIST_PATH) {
- salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
- args += ' --blacklist-file /root/tempest/skip.list '
- }
}
- else {
- common.infoMsg('Skipping Tempest config generation')
- salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+ // we need to refresh pillars on target node after runtest state
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("Reverting tempest_test_target parameter")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+ }
+ SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+ if (SKIP_LIST_PATH) {
+ mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/root/tempest/skip.list"]
+ salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+ args += ' --blacklist-file /root/tempest/skip.list '
}
}
+ else {
+ common.infoMsg('Skipping Tempest config generation')
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+ }
+ }
+ try{
stage('Run Tempest tests') {
- // parameters: master, target, dockerImageLink, name, env_var, entrypoint, tempestConfLocalPath
- validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', [], true,
- '/root/test/tempest_generated.conf')
+ mounts = mounts + ["${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}": "/etc/tempest/tempest.conf"]
+ validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
+ mounts: mounts, name: container_name)
report_prefix += 'tempest_'
if (env.concurrency) {
args += ' -w ' + env.concurrency
@@ -141,10 +155,10 @@
else {
if (TEMPEST_TEST_PATTERN != 'set=full') {
args += " -r ${TEMPEST_TEST_PATTERN} "
- report_prefix += 'full'
+ report_prefix += 'custom'
}
}
- salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' cvp /bin/bash -c 'run-tempest'")
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
}
stage('Collect results') {
report_prefix += "_report_${env.BUILD_NUMBER}"
@@ -156,13 +170,9 @@
archiveArtifacts artifacts: "${report_prefix}.*"
junit "${report_prefix}.xml"
}
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- throw e
} finally {
- if (DEBUG_MODE == 'false') {
- validate.runCleanup(saltMaster, TARGET_NODE)
+ if ( ! DEBUG_MODE.toBoolean() ) {
+ validate.runCleanup(saltMaster, TARGET_NODE, container_name)
}
}
}
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index dca2462..7ec1092 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -71,7 +71,7 @@
try {
allowedGroups = ['release-engineering']
releaseTags = ['proposed', 'release', '2018', '2019', '2020']
- tags = [env.SOURCE_IMAGE_TAG, env.IMAGE_TAG]
+ tags = [env.IMAGE_TAG]
tagInRelease = tags.any { tag -> releaseTags.any { tag.contains(it) } }
if (tagInRelease) {
if (!jenkinsUtils.currentUserInGroups(allowedGroups)) {
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index e7887f9..3313d48 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -31,7 +31,6 @@
giveVerify = true
}
-
timeout(time: 12, unit: 'HOURS') {
node(slaveNode) {
try {
@@ -42,59 +41,60 @@
def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
def doSubmit = false
def skipProjectsVerify = ['mk/docker-jnlp-slave']
+
stage("test") {
- if (gerritChange.status != "MERGED" && !env.SKIP_TEST.toBoolean()) {
- // test max CodeReview
- if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
- doSubmit = true
- def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
- def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
- if (gerritProject in skipProjectsVerify) {
- common.successMsg("Project ${gerritProject} doesn't require verify, skipping...")
- giveVerify = true
+ //notification about Start job
+ ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s -m \"'Build Started %s'\"", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER, BUILD_URL))
+ //check Code-Review
+ if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
+ throw new Exception('Change don\'t have a CodeReview+1, reject gate')
+ }
+ //check Verify
+ if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")) {
+ throw new Exception('Change don\'t have initial Verify+1, reject gate')
+ } else if (gerritChange.status != "MERGED" && !env.SKIP_TEST.toBoolean()) {
+ //Verify-label off
+ ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s --verified 0", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ //Do stage (test)
+ doSubmit = true
+ def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
+ def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
+ if (gerritProject in skipProjectsVerify) {
+ common.successMsg("Project ${gerritProject} doesn't require verify, skipping...")
+ giveVerify = true
+ } else {
+ def jobsNamespace = JOBS_NAMESPACE
+ def plural_namespaces = ['salt-formulas', 'salt-models']
+ // remove plural s on the end of job namespace
+ if (JOBS_NAMESPACE in plural_namespaces) {
+ jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+ }
+ // salt-formulas tests have -latest on end of the name
+ if (JOBS_NAMESPACE.equals("salt-formulas")) {
+ gerritProject = gerritProject + "-latest"
+ }
+ def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+ if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ callJobWithExtraVars('test-salt-model-ci-wrapper')
} else {
- def jobsNamespace = JOBS_NAMESPACE
- def plural_namespaces = ['salt-formulas', 'salt-models']
- // remove plural s on the end of job namespace
- if (JOBS_NAMESPACE in plural_namespaces) {
- jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
- }
- // salt-formulas tests have -latest on end of the name
- if (JOBS_NAMESPACE.equals("salt-formulas")) {
- gerritProject = gerritProject + "-latest"
- }
- def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
- if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
- callJobWithExtraVars('test-salt-model-ci-wrapper')
+ if (isJobExists(testJob)) {
+ common.infoMsg("Test job ${testJob} found, running")
+ def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+ ]
+ giveVerify = true
} else {
- if (isJobExists(testJob)) {
- common.infoMsg("Test job ${testJob} found, running")
- def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
- if (JOBS_NAMESPACE.equals("salt-formulas")) {
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC],
- [$class: 'StringParameterValue', name: 'GATING_GERRIT_BRANCH', value: GERRIT_BRANCH]
- ]
- } else {
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
- ]
- }
- giveVerify = true
- } else {
- common.infoMsg("Test job ${testJob} not found")
- }
+ common.infoMsg("Test job ${testJob} not found")
}
}
- } else {
- common.errorMsg("Change don't have a CodeReview, skipping gate")
}
} else {
- common.infoMsg("Test job skipped")
+ common.infoMsg('Test job skipped')
}
}
+
stage("submit review") {
if (gerritChange.status == "MERGED") {
common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index c572041..29f03fe 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -23,6 +23,50 @@
distribRevision = 'proposed'
gitGuessedVersion = false
+def GenerateModelToxDocker(Map params) {
+ def ccRoot = params['ccRoot']
+ def context = params['context']
+ def outDir = params['outDir']
+ def envOpts = params['envOpts']
+ def tempContextFile = new File(ccRoot, 'tempContext.yaml_' + UUID.randomUUID().toString()).toString()
+ writeFile file: tempContextFile, text: context
+ // Get Jenkins user UID and GID
+ def jenkinsUID = sh(script: 'id -u', returnStdout: true).trim()
+ def jenkinsGID = sh(script: 'id -g', returnStdout: true).trim()
+ /*
+ by default, process in image operates via root user
+ Otherwise, gpg key for model and all files managed by jenkins user
+ To make it compatible, install rrequirementfrom user, but generate model via jenkins
+ for build use upstream Ubuntu Bionic image
+ */
+ def configRun = ['distribRevision': 'nightly',
+ 'envOpts' : envOpts + ["CONFIG_FILE=$tempContextFile",
+ "OUTPUT_DIR=${outDir}"
+ ],
+ 'image': 'docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jnlp-slave',
+ 'runCommands' : [
+ '001_prepare_generate_auto_reqs': {
+ sh('''
+ pip install tox
+ ''')
+ },
+ // user & group can be different on host and in docker
+ '002_set_jenkins_id': {
+ sh("""
+ usermod -u ${jenkinsUID} jenkins
+ groupmod -g ${jenkinsUID} jenkins
+ """)
+ },
+ '003_run_generate_auto': {
+ print('[Cookiecutter build] Result:\n' +
+ sh(returnStdout: true, script: 'cd ' + ccRoot + '; su jenkins -c "tox -ve generate_auto" '))
+ }
+ ]
+ ]
+
+ saltModelTesting.setupDockerAndTest(configRun)
+}
+
def globalVariatorsUpdate() {
def templateContext = readYaml text: env.COOKIECUTTER_TEMPLATE_CONTEXT
def context = templateContext['default_context']
@@ -31,34 +75,19 @@
// because each of them, might be 'refs/' variable, we need to add some tricky trigger of using
// 'release/XXX' logic. This is totall guess - so,if even those one failed, to definitely must pass
// correct variable finally!
- [context.get('cookiecutter_template_branch'), context.get('shared_reclass_branch'), context.get('mcp_common_scripts_branch')].any { branch ->
+ [ context.get('cookiecutter_template_branch'), context.get('shared_reclass_branch'), context.get('mcp_common_scripts_branch') ].any { branch ->
if (branch.toString().startsWith('release/')) {
gitGuessedVersion = branch
return true
}
}
- // Use mcpVersion git tag if not specified branch for cookiecutter-templates
- if (!context.get('cookiecutter_template_branch')) {
- context['cookiecutter_template_branch'] = gitGuessedVersion ?: context['mcp_version']
- }
- // Don't have n/t/s for cookiecutter-templates repo, therefore use master
- if (["nightly", "testing", "stable"].contains(context['cookiecutter_template_branch'])) {
- context['cookiecutter_template_branch'] = 'master'
- }
- if (!context.get('shared_reclass_branch')) {
- context['shared_reclass_branch'] = gitGuessedVersion ?: context['mcp_version']
- }
- // Don't have nightly/testing for reclass-system repo, therefore use master
- if (["nightly", "testing", "stable"].contains(context['shared_reclass_branch'])) {
- context['shared_reclass_branch'] = 'master'
- }
- if (!context.get('mcp_common_scripts_branch')) {
- // Pin exactly to CC branch, since it might use 'release/XXX' format
- context['mcp_common_scripts_branch'] = gitGuessedVersion ?: context['mcp_version']
- }
- // Don't have n/t/s for mcp-common-scripts repo, therefore use master
- if (["nightly", "testing", "stable"].contains(context['mcp_common_scripts_branch'])) {
- context['mcp_common_scripts_branch'] = 'master'
+
+ [ 'cookiecutter_template_branch', 'shared_reclass_branch', 'mcp_common_scripts_branch' ].each { repoName ->
+ if (context['mcp_version'] in [ "nightly", "testing", "stable" ] && ! context.get(repoName)) {
+ context[repoName] = 'master'
+ } else if (! context.get(repoName)) {
+ context[repoName] = gitGuessedVersion ?: "release/${context['mcp_version']}".toString()
+ }
}
//
distribRevision = context['mcp_version']
@@ -86,6 +115,13 @@
updateSaltFormulasDuringTest = false
}
+ if (gitGuessedVersion == 'release/proposed/2019.2.0') {
+ // CFG node in 2019.2.X update has to be bootstrapped with update/proposed repository for salt formulas
+ context['cloudinit_master_config'] = context.get('cloudinit_master_config', false) ?: [:]
+ context['cloudinit_master_config']['MCP_SALT_REPO_UPDATES'] = context['cloudinit_master_config'].get('MCP_SALT_REPO_UPDATES', false) ?:
+ 'deb [arch=amd64] http://mirror.mirantis.com/update/proposed/salt-formulas/xenial xenial main'
+ }
+
common.infoMsg("Using context:\n" + context)
print prettyPrint(toJson(context))
return context
@@ -141,17 +177,22 @@
stage('Generate model') {
// GNUPGHOME environment variable is required for all gpg commands
// and for python.generateModel execution
- withEnv(["GNUPGHOME=${env.WORKSPACE}/gpghome"]) {
+ def envOpts = ["GNUPGHOME=${env.WORKSPACE}/gpghome"]
+ withEnv(envOpts) {
if (context['secrets_encryption_enabled'] == 'True') {
sh "mkdir gpghome; chmod 700 gpghome"
def secretKeyID = RequesterEmail ?: "salt@${context['cluster_domain']}".toString()
if (!context.get('secrets_encryption_private_key')) {
def batchData = """
+ %echo Generating a basic OpenPGP key for Salt-Master
+ %no-protection
Key-Type: 1
Key-Length: 4096
Expire-Date: 0
Name-Real: ${context['salt_master_hostname']}.${context['cluster_domain']}
Name-Email: ${secretKeyID}
+ %commit
+ %echo done
""".stripIndent()
writeFile file: 'gpg-batch.txt', text: batchData
sh "gpg --gen-key --batch < gpg-batch.txt"
@@ -159,7 +200,7 @@
} else {
writeFile file: 'gpgkey.asc', text: context['secrets_encryption_private_key']
sh "gpg --import gpgkey.asc"
- secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | awk -F: -e "/^sec/{print \\$5; exit}"').trim()
+ secretKeyID = sh(returnStdout: true, script: 'gpg --list-secret-keys --with-colons | grep -E "^sec" | awk -F: \'{print \$5}\'').trim()
}
context['secrets_encryption_key_id'] = secretKeyID
}
@@ -179,7 +220,10 @@
// still expect only lower lvl of project, aka model/classes/cluster/XXX/. So,lets dump result into
// temp dir, and then copy it over initial structure.
reclassTempRootDir = sh(script: "mktemp -d -p ${env.WORKSPACE}", returnStdout: true).trim()
- python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, reclassTempRootDir, templateEnv, false)
+ GenerateModelToxDocker(['context': common2.dumpYAML(['default_context': context]),
+ 'ccRoot' : templateEnv,
+ 'outDir' : reclassTempRootDir,
+ 'envOpts': envOpts])
dir(modelEnv) {
common.warningMsg('Forming reclass-root structure...')
sh("cp -ra ${reclassTempRootDir}/reclass/* .")
@@ -264,6 +308,11 @@
def smc = [:]
smc['SALT_MASTER_MINION_ID'] = "${context['salt_master_hostname']}.${context['cluster_domain']}"
smc['SALT_MASTER_DEPLOY_IP'] = context['salt_master_management_address']
+ if (context.get('cloudinit_master_config', false)) {
+ context['cloudinit_master_config'].each { k, v ->
+ smc[k] = v
+ }
+ }
if (outdateGeneration) {
smc['DEPLOY_NETWORK_GW'] = context['deploy_network_gateway']
smc['DEPLOY_NETWORK_NETMASK'] = context['deploy_network_netmask']
@@ -298,7 +347,7 @@
}
for (i in common.entries(smc)) {
- sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+ sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=\${${i[0]}:-\"${i[1]}\"},' user_data"
}
// calculate netmask
@@ -367,10 +416,31 @@
archiveArtifacts artifacts: "${context['cluster_name']}.tar.gz"
if (RequesterEmail != '' && !RequesterEmail.contains('example')) {
- emailext(to: RequesterEmail,
- attachmentsPattern: "output-${context['cluster_name']}/*",
- body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
- subject: "Your Salt model ${context['cluster_name']}")
+ def mailSubject = "Your Salt model ${context['cluster_name']}"
+ if (context.get('send_method') == 'gcs') {
+ def gcs = new com.mirantis.mk.GoogleCloudStorage()
+ def uploadIsos = [ "output-${context['cluster_name']}/${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso" ]
+ if (context['local_repositories'] == 'True') {
+ uploadIsos << "output-${context['cluster_name']}/${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+ }
+ // generate random hash to have uniq and unpredictable link to file
+ def randHash = common.generateRandomHashString(64)
+ def config = [
+ 'creds': context['gcs_creds'],
+ 'project': context['gcs_project'],
+ 'dest': "gs://${context['gcs_bucket']}/${randHash}",
+ 'sources': uploadIsos
+ ]
+ def fileURLs = gcs.uploadArtifactToGoogleStorageBucket(config).join(' ').replace('gs://', 'https://storage.googleapis.com/')
+ emailext(to: RequesterEmail,
+ body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and available to download via next URL: ${fileURLs} within 7 days.\nEnjoy!\n\nMirantis",
+ subject: mailSubject)
+ } else {
+ emailext(to: RequesterEmail,
+ attachmentsPattern: "output-${context['cluster_name']}/*",
+ body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+ subject: mailSubject)
+ }
}
dir("output-${context['cluster_name']}") {
deleteDir()
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index fa49bbc..6f14866 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -5,6 +5,22 @@
timeout(time: 12, unit: 'HOURS') {
node() {
try {
+ def sourceCreds = env.SOURCE_CREDENTIALS
+ if (sourceCreds && common.getCredentialsById(sourceCreds, 'password')) {
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : sourceCreds,
+ passwordVariable: 'GIT_PASS',
+ usernameVariable: 'GIT_USER']
+ ]) {
+ sh """
+ set +x
+ git config --global credential.${SOURCE_URL}.username \${GIT_USER}
+ echo "echo \${GIT_PASS}" > askpass.sh && chmod +x askpass.sh
+ """
+ env.GIT_ASKPASS = "${env.WORKSPACE}/askpass.sh"
+ }
+ }
if (BRANCHES == '*' || BRANCHES.contains('*')) {
branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
} else {
@@ -18,7 +34,8 @@
dir('source') {
checkout changelog: true, poll: true,
scm: [$class : 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
- extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+ extensions: [[$class: 'CleanCheckout']], submoduleCfg: [],
+ userRemoteConfigs: [[credentialsId: sourceCreds, url: SOURCE_URL]]]
git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, branches, true)
}
} catch (Throwable e) {
@@ -26,6 +43,9 @@
currentBuild.result = 'FAILURE'
currentBuild.description = currentBuild.description ? e.message + '' + currentBuild.description : e.message
throw e
+ } finally {
+ sh "git config --global --unset credential.${SOURCE_URL}.username || true"
+ deleteDir()
}
}
}
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index f2dd78c..5929390 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -32,6 +32,8 @@
* No service downtime
* No workload downtime''',
'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
* Verify API, perform basic CRUD operations for services.
* Verify that compute/neutron agents on hosts are up.
* Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -153,6 +155,8 @@
for (target in upgradeTargets){
common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, 'linux.system.repo')
openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
}
@@ -173,6 +177,9 @@
if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
debian.osUpgradeNode(env, target, upgrade_mode, false)
}
+ // Workaround for PROD-31413, install python-tornado from latest release if available and
+ // restart minion to apply new code.
+ salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
}
common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index 7458a27..e768564 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -31,6 +31,8 @@
* No service downtime
* No workload downtime''',
'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
* Verify API, perform basic CRUD operations for services.
* Verify that compute/neutron agents on hosts are up.
* Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -138,6 +140,8 @@
for (target in targetNodes){
common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, 'linux.system.repo')
openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
@@ -158,6 +162,9 @@
if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
debian.osUpgradeNode(env, target, upgrade_mode, false)
}
+ // Workaround for PROD-31413, install python-tornado from latest release if available and
+ // restart minion to apply new code.
+ salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
}
common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-galera-upgrade.groovy b/openstack-galera-upgrade.groovy
new file mode 100644
index 0000000..f124051
--- /dev/null
+++ b/openstack-galera-upgrade.groovy
@@ -0,0 +1,206 @@
+/**
+ * Upgrade MySQL and Galera packages on dbs nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.15:6969].
+ * SHUTDOWN_CLUSTER Shutdown all mysql instances on target nodes at the same time.
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade).
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
+ * INTERACTIVE Ask interactive questions during pipeline run (bool).
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
+def galera = new com.mirantis.mk.Galera()
+def shutdownCluster = SHUTDOWN_CLUSTER.toBoolean()
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify MySQL is running and Galera cluster is operational.''',
+ 'State result': 'Basic checks around wsrep Galera status are passed.'
+ ])
+
+upgradeStageMap.put('Stop MySQL service',
+ [
+ 'Description': 'All MySQL services will be stopped on All TARGET_SERVERS nodes.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * MySQL services are stopped.
+ * OpenStack APIs are not accessible from this point.
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Stop MySQL services''',
+ 'State result': 'MySQL service is stopped',
+ ])
+
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * System packages are updated
+ * Node might be rebooted
+'''
+ ])
+
+upgradeStageMap.put('Upgrade MySQL server',
+ [
+ 'Description': 'MySQL and Erlang code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services loose connection to MySQL server
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of MySQL and Galera packages
+ * Render version of configs''',
+ 'State result': '''
+ * MySQL packages are upgraded''',
+ ])
+
+upgradeStageMap.put('Start MySQL service',
+ [
+ 'Description': 'All MySQL services will be running on All TARGET_SERVERS nodes.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * MySQL service is running.
+ * OpenStack API are accessible from this point.
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Start MySQL service''',
+ 'State result': 'MySQL service is running',
+ ])
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+ if (upgradeTargets.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
+ }
+
+ def targetSecMapping = [:]
+ def secNoList = []
+ def out
+ def stopTargets = upgradeTargets.reverse()
+ common.printStageMap(upgradeStageMap)
+
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+ }
+
+ for (target in upgradeTargets) {
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, ['linux.system.repo'])
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+ }
+
+ if (shutdownCluster){
+ for (target in stopTargets) {
+ common.stageWrapper(upgradeStageMap, "Stop MySQL service", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+ }
+ }
+ }
+
+ for (target in upgradeTargets) {
+ out = salt.cmdRun(env, target, 'cat /var/lib/mysql/grastate.dat | grep "seqno" | cut -d ":" -f2', true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ common.infoMsg("Get seqno: ${out} for node ${target}")
+ if (!out.isNumber()){
+ out = -2
+ }
+ targetSecMapping[out.toInteger()] = target
+ secNoList.add(out.toInteger())
+ }
+
+ def masterNode = targetSecMapping[secNoList.max()]
+ common.infoMsg("Master node is: ${masterNode}")
+
+ // Make sure we start upgrade always from master node
+ upgradeTargets.remove(masterNode)
+ upgradeTargets = [masterNode] + upgradeTargets
+ common.infoMsg("Upgrade targets are: ${upgradeTargets}")
+
+ for (target in upgradeTargets) {
+
+ common.stageWrapper(upgradeStageMap, "Stop MySQL service", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
+ }
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade MySQL server", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+ openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+ }
+
+ if (shutdownCluster && target == masterNode){
+ //Start first node.
+ common.stageWrapper(upgradeStageMap, "Start MySQL service", target, interactive) {
+ galera.startFirstNode(env, target)
+ }
+ }
+
+ common.stageWrapper(upgradeStageMap, "Start MySQL service", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+ }
+
+ // restart first node by applying state.
+
+ if (shutdownCluster) {
+ openstack.runOpenStackUpgradePhase(env, masterNode, 'render_config')
+ salt.cmdRun(env, masterNode, "service mysql reload")
+ openstack.runOpenStackUpgradePhase(env, masterNode, 'verify')
+ }
+
+ for (target in upgradeTargets) {
+ ensureClusterState = galera.getWsrepParameters(env, target, 'wsrep_evs_state')
+ if (ensureClusterState['wsrep_evs_state'] == 'OPERATIONAL') {
+ common.infoMsg('Node is in OPERATIONAL state.')
+ } else {
+ throw new Exception("Node is NOT in OPERATIONAL state.")
+ }
+ }
+ }
+}
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
index aabdafc..bc252da 100644
--- a/openstack-rabbitmq-upgrade.groovy
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -29,6 +29,8 @@
* No service downtime
* No workload downtime''',
'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
* Verify API, perform basic CRUD operations for services.
* Verify rabbitmq is running and operational.''',
'State result': 'Basic checks around services API are passed.'
@@ -114,6 +116,8 @@
for (target in upgradeTargets){
common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, 'linux.system.repo')
openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
}
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index b585e7e..fb1259f 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -7,11 +7,20 @@
*
**/
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
+
+def getValueForPillarKey(pepperEnv, target, pillarKey) {
+ def out = salt.getReturnValues(salt.getPillar(pepperEnv, target, pillarKey))
+ if (out == '') {
+ throw new Exception("Cannot get value for ${pillarKey} key on ${target} target")
+ }
+ return out.toString()
+}
+
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -28,54 +37,71 @@
}
}
- stage('Backup') {
- salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'bash /usr/local/bin/cassandra-backup-runner-call.sh')
- }
-
stage('Restore') {
+ // stop neutron-server to prevent CRUD api calls to contrail-api service
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('neutron-server service already stopped')
+ }
// get opencontrail version
- def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
- def contrailVersion = _pillar['return'][0].values()[0]
- common.infoMsg("Contrail version is ${contrailVersion}")
- if (contrailVersion >= 4) {
- common.infoMsg("There will be steps for OC4.0 restore")
+ def contrailVersion = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version")
+ def configDbIp = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:host")
+ def configDbPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:port_configdb")
+ common.infoMsg("OpenContrail version is ${contrailVersion}")
+ if (contrailVersion.startsWith('4')) {
+ controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary",
+ "docker:client:compose:opencontrail:service:controller:container_name")
+ common.infoMsg("Applying db restore procedure for OpenContrail 4.X version")
try {
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+ common.infoMsg("Stop contrail control plane containers")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose down')
} catch (Exception err) {
- common.warningMsg('contrail-database already stopped? ' + err.getMessage())
+ common.errorMsg('An error has been occurred during contrail containers shutdown: ' + err.getMessage())
+ throw err
}
try {
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller bash -c "for f in $(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/$f; done"')
+ common.infoMsg("Cleanup cassandra data")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'for f in $(ls /var/lib/configdb/); do rm -r /var/lib/configdb/$f; done')
} catch (Exception err) {
- common.warningMsg('cassandra data already removed? ' + err.getMessage())
+ common.errorMsg('Cannot cleanup cassandra data on control nodes: ' + err.getMessage())
+ throw err
}
try {
- salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+ common.infoMsg("Start cassandra db on I@cassandra:backup:client node")
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
} catch (Exception err) {
- common.warningMsg('contrail-database already started? ' + err.getMessage())
+ common.errorMsg('An error has been occurred during cassandra db startup on I@cassandra:backup:client node: ' + err.getMessage())
+ throw err
}
- // remove restore-already-happenned file if any is present
+ // wait for cassandra to be online
+ common.retry(6, 20){
+ common.infoMsg("Trying to connect to casandra db on I@cassandra:backup:client node ...")
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "nc -v -z -w2 ${configDbIp} ${configDbPort}")
+ }
+ // remove restore-already-happened file if any is present
try {
- salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
} catch (Exception err) {
common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
}
- // perform actual backup
salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
- salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
- sleep(5)
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
- // the lovely wait-60-seconds mantra before restarting supervisor-database service
- sleep(60)
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
+ try {
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+ } catch (Exception err) {
+ common.errorMsg('An error has been occurred during cassandra db startup on I@opencontrail:control and not I@cassandra:backup:client nodes: ' + err.getMessage())
+ throw err
+ }
// another mantra, wait till all services are up
sleep(60)
- } else {
try {
- salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
- } catch (Exception er) {
- common.warningMsg('neutron-server service already stopped')
+ common.infoMsg("Start analytics containers node")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+ } catch (Exception err) {
+ common.errorMsg('An error has been occurred during analytics containers startup: ' + err.getMessage())
+ throw err
}
+ } else {
try {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
} catch (Exception er) {
@@ -104,8 +130,7 @@
common.warningMsg('Directory already empty')
}
- _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
- def backupDir = _pillar['return'][0].values()[0] ?: '/var/backups/cassandra'
+ def backupDir = getValueForPillarKey(pepperEnv, "I@cassandra:backup:client", "cassandra:backup:backup_dir")
common.infoMsg("Backup directory is ${backupDir}")
salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backupDir}/dbrestored"], null, true)
@@ -127,7 +152,6 @@
sleep(5)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
- salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
// wait until contrail-status is up
salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
@@ -135,11 +159,12 @@
salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
}
+
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
}
stage('Opencontrail controllers health check') {
- common.retry(3, 20){
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
+ common.retry(9, 20){
salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
}
}
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 85b93e9..7554530 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -49,27 +49,40 @@
def verify_es_is_green(master) {
common.infoMsg('Verify that the Elasticsearch cluster status is green')
try {
- def retries_wait = 20
- def retries = 15
+ def retries_wait = 120
+ def retries = 60
+
def elasticsearch_vip
- def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
- if(!pillar['return'].isEmpty()) {
- elasticsearch_vip = pillar['return'][0].values()[0]
+ def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
+ if(pillar) {
+ elasticsearch_vip = pillar
} else {
errorOccured = true
common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
}
- pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port'))
def elasticsearch_port
- if(!pillar['return'].isEmpty()) {
- elasticsearch_port = pillar['return'][0].values()[0]
+ if(pillar) {
+ elasticsearch_port = pillar
} else {
errorOccured = true
common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
}
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:scheme'))
+ def elasticsearch_scheme
+ if(pillar) {
+ elasticsearch_scheme = pillar
+ common.infoMsg("[INFO] Using elasticsearch scheme: ${elasticsearch_scheme}")
+ } else {
+ common.infoMsg('[INFO] No pillar with Elasticsearch server scheme, using scheme: http')
+ elasticsearch_scheme = "http"
+ }
+
common.retry(retries,retries_wait) {
common.infoMsg('Waiting for Elasticsearch to become green..')
- salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+ salt.cmdRun(master, "I@elasticsearch:client", "curl -sfk ${elasticsearch_scheme}://${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
}
} catch (Exception er) {
errorOccured = true
@@ -204,8 +217,11 @@
common.infoMsg('Start the monitoring services')
salt.enforceState([saltId: pepperEnv, target: 'I@docker:swarm:role:master and I@prometheus:server', state: 'docker'])
salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+ common.infoMsg("Waiting grafana service to start")
+ sleep(120)
+
common.infoMsg('Refresh the Grafana dashboards')
- salt.enforceState([saltId: pepperEnv, target: 'I@grafana:client', state: 'grafana.client'])
+ salt.enforceState([saltId: pepperEnv, target: 'I@grafana:client', state: 'grafana.client', retries: 10, retries_wait: 30])
} catch (Exception er) {
errorOccured = true
common.errorMsg("[ERROR] Upgrade of docker components failed. Please fix it manually.")
diff --git a/test-model-generator.groovy b/test-model-generator.groovy
index 02e1789..8c08493 100644
--- a/test-model-generator.groovy
+++ b/test-model-generator.groovy
@@ -27,6 +27,9 @@
def dockerRegistry = env.DOCKER_REGISTRY ?: 'docker-prod-local.docker.mirantis.net'
def dockerReviewRegistry = env.DOCKER_REVIEW_REGISTRY ?: 'docker-dev-local.docker.mirantis.net'
def cvpImageName = env.CVP_DOCKER_IMG ? "${dockerRegistry}/${env.CVP_DOCKER_IMG}:${version}" : "${dockerRegistry}/mirantis/cvp/cvp-trymcp-tests:${version}"
+if (env.CVP_DEV_TAG && env.CVP_DOCKER_IMG) {
+ cvpImageName = "${dockerReviewRegistry}/${env.CVP_DOCKER_IMG}:${env.CVP_DEV_TAG}"
+}
def checkouted = false
def testReportHTMLFile = 'reports/report.html'
@@ -44,6 +47,15 @@
sh "mkdir -p reports ${apiProject} ${uiProject}"
def testImage = docker.image(cvpImageName)
def testImageOptions = "-u root:root --network=host -v ${env.WORKSPACE}/reports:/var/lib/qa_reports --entrypoint=''"
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : 'scale-ci',
+ passwordVariable: 'JENKINS_PASSWORD',
+ usernameVariable: 'JENKINS_USER']
+ ]) {
+ env.JENKINS_USER = JENKINS_USER
+ env.JENKINS_PASSWORD = JENKINS_PASSWORD
+ }
try {
stage("checkout") {
if (event) {
@@ -137,7 +149,7 @@
dir(apiProject) {
python.runVirtualenvCommand("${env.WORKSPACE}/venv",
- "export IMAGE=${apiImage.id}; ./bootstrap_env.sh up")
+ "export IMAGE=${apiImage.id}; export DOCKER_COMPOSE=docker-compose-test.yml; ./bootstrap_env.sh up")
common.retry(5, 20) {
sh 'curl -v http://127.0.0.1:8001/api/v1 > /dev/null'
}
@@ -158,7 +170,7 @@
export TEST_PASSWORD=default
export TEST_MODELD_URL=127.0.0.1
export TEST_MODELD_PORT=3000
- export TEST_TIMEOUT=30
+ export TEST_TIMEOUT=15
cd /var/lib/trymcp-tests
pytest ${component}
"""
diff --git a/update-glusterfs-clients.groovy b/update-glusterfs-clients.groovy
new file mode 100644
index 0000000..02e889a
--- /dev/null
+++ b/update-glusterfs-clients.groovy
@@ -0,0 +1,119 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_SERVER_STATUS Does not validate server availability/status before update
+ * IGNORE_SERVER_VERSION Does not validate that all servers have been updated
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+@NonCPS
+def getNextNode() {
+ for (n in hudson.model.Hudson.instance.slaves) {
+ node_name = n.getNodeName()
+ if (node_name != env.SLAVE_NAME) {
+ return node_name
+ }
+ }
+}
+
+def update() {
+ def pEnv = "pepperEnv"
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def pkg_name = 'glusterfs-client'
+
+ /**
+ * - choose only those hosts where update is available. Exclude minion on which job is running
+ * - validate that all gluasterfs servers are in normal working state. Can be skipped with option
+ * - validate that glusterfs on all servers has been updated, otherwise stop update. Can be skipped with option
+ * - run update state on one client at a time
+ */
+
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+ if (all_minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+
+ minions = []
+ for (minion in all_minions) {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+ current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+ slave_container_id = salt.getReturnValues(salt.cmdRun(pEnv, minion, "which docker >/dev/null && docker ps --filter name=jenkins_${env.NODE_NAME} --filter status=running -q", false)).split('\n')[0]
+ if (latest_version != current_version) {
+ if (!slave_container_id.isEmpty() && !minion.startsWith('cfg')) {
+ env.SLAVE_NAME = env.NODE_NAME
+ env.SLAVE_MINION = minion
+ } else {
+ minions.add(minion)
+ }
+ } else {
+ common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+ }
+ }
+ }
+ if (!minions.isEmpty()) {
+ if (!IGNORE_SERVER_STATUS.toBoolean()){
+ stage('Validate servers availability') {
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+ common.successMsg("All glusterfs servers are available")
+ }
+ } else {
+ common.warningMsg("Check of glusterfs servers availability has been disabled")
+ }
+ if (!IGNORE_SERVER_VERSION.toBoolean()){
+ stage('Check that all glusterfs servers have been updated') {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minions[0], 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0].split('-')[0]
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "glusterfsd --version | head -n1 | awk '{print \$2}' | egrep '^${latest_version}' || echo none", latest_version, true, true, null, true, 1)
+ common.successMsg('All glusterfs servers have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of glusterfs servers' version has been disabled")
+ }
+ // Actual update
+ for (tgt in minions) {
+ stage("Update glusterfs on ${tgt}") {
+ salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.client'])
+ }
+ }
+ } else if (env.SLAVE_MINION == null) {
+ common.warningMsg("No hosts to update glusterfs on")
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+ throw e
+ }
+}
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ update()
+ }
+ // Perform an update from another slave to finish update on previous slave host
+ if (env.SLAVE_NAME != null && !env.SLAVE_NAME.isEmpty()) {
+ node(getNextNode()) {
+ update()
+ }
+ }
+}
diff --git a/update-glusterfs-cluster-op-version.groovy b/update-glusterfs-cluster-op-version.groovy
new file mode 100644
index 0000000..9623481
--- /dev/null
+++ b/update-glusterfs-cluster-op-version.groovy
@@ -0,0 +1,110 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_CLIENT_VERSION Does not validate that all clients have been updated
+ * IGNORE_SERVER_VERSION Does not validate that all servers have been updated
+ * CLUSTER_OP_VERSION GlusterFS cluster.op-verion option to set. Default is to be set to current cluster.max-op-version if available.
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+/**
+ * - ensure that cluster.op-version can be updated
+ * - check that all servers have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - check that all clients have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - set cluster.op-version
+ */
+
+/**
+ * Convert glusterfs' cluster.op-version to regular version string
+ *
+ * @param version string representing cluster.op-version, i.e. 50400
+ * @return string version number, i.e. 5.4.0
+ */
+def convertVersion(version) {
+ new_version = version[0]
+ for (i=1;i<version.length();i++) {
+ if (i%2 == 0) {
+ new_version += version[i]
+ } else if (version[i] == '0') {
+ new_version += '.'
+ } else {
+ new_version += '.' + version[i]
+ }
+ }
+ return new_version
+}
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+ stage('Get current cluster.op-version') {
+ volume = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume list")).split('\n')[0]
+ currentOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+ }
+ if (CLUSTER_OP_VERSION.isEmpty()) {
+ stage('Get cluster.max-op-version') {
+ CLUSTER_OP_VERSION = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get all cluster.max-op-version 2>/dev/null | grep cluster.max-op-version | awk '{print \$2}'")).split('\n')[0]
+ }
+ }
+ if (CLUSTER_OP_VERSION.isEmpty() || CLUSTER_OP_VERSION.length() != 5) {
+ msg = 'No cluster.op-version specified to set'
+ common.errorMsg(msg)
+ currentBuild.result = "FAILURE"
+ currentBuild.description = msg
+ } else if (currentOpVersion == CLUSTER_OP_VERSION) {
+ common.warningMsg("cluster.op-version is already set to ${currentOpVersion}")
+ } else {
+ version = convertVersion(CLUSTER_OP_VERSION)
+ if (!IGNORE_SERVER_VERSION.toBoolean()){
+ stage('Check that all servers have been updated') {
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+ common.successMsg('All servers have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of servers' version has been disabled")
+ }
+ if (!IGNORE_CLIENT_VERSION.toBoolean()){
+ stage('Check that all clients have been updated') {
+ salt.commandStatus(pEnv, 'I@glusterfs:client', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+ common.successMsg('All clients have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of clients' version has been disabled")
+ }
+ stage("Update cluster.op-version") {
+ salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume set all cluster.op-version ${CLUSTER_OP_VERSION}")
+ }
+ stage("Validate cluster.op-version") {
+ newOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+ if (newOpVersion != CLUSTER_OP_VERSION) {
+ throw new Exception("cluster.op-version was not set to ${CLUSTER_OP_VERSION}")
+ }
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
+}
diff --git a/update-glusterfs-servers.groovy b/update-glusterfs-servers.groovy
new file mode 100644
index 0000000..23b280d
--- /dev/null
+++ b/update-glusterfs-servers.groovy
@@ -0,0 +1,92 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_SERVER_STATUS Does not validate server availability/status before update
+ * IGNORE_NON_REPLICATED_VOLUMES Update GlusterFS even there is a non-replicated volume(s)
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+def pkg_name = 'glusterfs-server'
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+/**
+ * - choose only those hosts where update is available
+ * - validate that all servers are in normal working state. Can be skipped with option
+ * - validate all volumes are replicated. If there is a non-replicated volume stop update. Can be skipped with option
+ * - run update state on one server at a time
+ */
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+ if (all_minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+ minions = []
+ for (minion in all_minions) {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+ current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+ if (latest_version != current_version) {
+ minions.add(minion)
+ } else {
+ common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+ }
+ }
+ }
+ if (!minions.isEmpty()) {
+ if (!IGNORE_SERVER_STATUS.toBoolean()){
+ stage('Validate servers availability') {
+ salt.commandStatus(pEnv, TARGET_SERVERS, "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+ common.successMsg("All servers are available")
+ }
+ } else {
+ common.warningMsg("Check of servers availability has been disabled")
+ }
+ if (!IGNORE_NON_REPLICATED_VOLUMES.toBoolean()){
+ stage('Check that all volumes are replicated') {
+ salt.commandStatus(pEnv, TARGET_SERVERS, "gluster volume info | fgrep 'Type:' | fgrep -v Replicate", null, false, true, null, true, 1)
+ common.successMsg("All volumes are replicated")
+ }
+ } else {
+ common.warningMsg("Check of volumes' replication has been disabled. Be aware, you may lost data during update!")
+ }
+ // Actual update
+ for (tgt in minions) {
+ stage("Update glusterfs on ${tgt}") {
+ salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.server'])
+ }
+ }
+ } else {
+ common.warningMsg("No hosts to update glusterfs on")
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+ throw e
+ }
+ }
+}
diff --git a/update-glusterfs.groovy b/update-glusterfs.groovy
new file mode 100644
index 0000000..67d3341
--- /dev/null
+++ b/update-glusterfs.groovy
@@ -0,0 +1,81 @@
+/**
+ * Complete update glusterfs pipeline
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+def waitGerrit(salt_target, wait_timeout) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def pEnv = "pepperEnv"
+ python.setupPepperVirtualenv(pEnv, env.SALT_MASTER_URL, env.SALT_MASTER_CREDENTIALS)
+
+ salt.fullRefresh(pEnv, salt_target)
+
+ def gerrit_master_url = salt.getPillar(pEnv, salt_target, '_param:gerrit_master_url')
+
+ if(!gerrit_master_url['return'].isEmpty()) {
+ gerrit_master_url = gerrit_master_url['return'][0].values()[0]
+ } else {
+ gerrit_master_url = ''
+ }
+
+ if (gerrit_master_url != '') {
+ common.infoMsg('Gerrit master url "' + gerrit_master_url + '" retrieved at _param:gerrit_master_url')
+ } else {
+ common.infoMsg('Gerrit master url could not be retrieved at _param:gerrit_master_url. Falling back to gerrit pillar')
+
+ def gerrit_host
+ def gerrit_http_port
+ def gerrit_http_scheme
+ def gerrit_http_prefix
+
+ def host_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:host')
+ gerrit_host = salt.getReturnValues(host_pillar)
+
+ def port_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:http_port')
+ gerrit_http_port = salt.getReturnValues(port_pillar)
+
+ def scheme_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:protocol')
+ gerrit_http_scheme = salt.getReturnValues(scheme_pillar)
+
+ def prefix_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:url_prefix')
+ gerrit_http_prefix = salt.getReturnValues(prefix_pillar)
+
+ gerrit_master_url = gerrit_http_scheme + '://' + gerrit_host + ':' + gerrit_http_port + gerrit_http_prefix
+
+ }
+
+ timeout(wait_timeout) {
+ common.infoMsg('Waiting for Gerrit to come up..')
+ def check_gerrit_cmd = 'while true; do curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + gerrit_master_url + '/ | grep 200 && break || sleep 1; done'
+ salt.cmdRun(pEnv, salt_target, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c -- ' + '"' + check_gerrit_cmd + '"')
+ }
+}
+
+node() {
+ stage('Update glusterfs servers') {
+ build(job: 'update-glusterfs-servers')
+ }
+ sleep 180
+ stage('Update glusterfs clients') {
+ build(job: 'update-glusterfs-clients')
+ }
+}
+node() {
+ waitGerrit('I@gerrit:client', 300)
+ stage('Update glusterfs cluster.op-version') {
+ build(job: 'update-glusterfs-cluster-op-version')
+ }
+}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index d1614eb..3a55011 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -22,14 +22,25 @@
venvPepper = "venvPepper"
workspace = ""
-def triggerMirrorJob(jobName) {
+def triggerMirrorJob(String jobName, String reclassSystemBranch) {
params = jenkinsUtils.getJobParameters(jobName)
- build job: jobName, parameters: [
- [$class: 'StringParameterValue', name: 'BRANCHES', value: params.get("BRANCHES")],
- [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: params.get("CREDENTIALS_ID")],
- [$class: 'StringParameterValue', name: 'SOURCE_URL', value: params.get("SOURCE_URL")],
- [$class: 'StringParameterValue', name: 'TARGET_URL', value: params.get("TARGET_URL")]
- ]
+ try {
+ build job: jobName, parameters: [
+ [$class: 'StringParameterValue', name: 'BRANCHES', value: params.get('BRANCHES')],
+ [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: params.get('CREDENTIALS_ID')],
+ [$class: 'StringParameterValue', name: 'SOURCE_URL', value: params.get('SOURCE_URL')],
+ [$class: 'StringParameterValue', name: 'TARGET_URL', value: params.get('TARGET_URL')]
+ ]
+ } catch (Exception updateErr) {
+ common.warningMsg(updateErr)
+ common.warningMsg('Attempt to update git repo in failsafe manner')
+ build job: jobName, parameters: [
+ [$class: 'StringParameterValue', name: 'BRANCHES', value: reclassSystemBranch.replace('origin/', '')],
+ [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: params.get('CREDENTIALS_ID')],
+ [$class: 'StringParameterValue', name: 'SOURCE_URL', value: params.get('SOURCE_URL')],
+ [$class: 'StringParameterValue', name: 'TARGET_URL', value: params.get('TARGET_URL')]
+ ]
+ }
}
def updateSaltStack(target, pkgs) {
@@ -58,64 +69,179 @@
}
}
+def wa29352(ArrayList saltMinions, String cname) {
+ // WA for PROD-29352. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/12/openssh/client/root.yml
+ // Default soft-param has been removed, what now makes not possible to render some old env's.
+ // Like fix, we found copy-paste already generated key from backups, to secrets.yml with correct key name
+ def wa29352ClassName = 'cluster.' + cname + '.infra.secrets_root_wa29352'
+ def wa29352File = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets_root_wa29352.yml"
+ def wa29352SecretsFile = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets.yml"
+ def _tempFile = '/tmp/wa29352_' + UUID.randomUUID().toString().take(8)
+ try {
+ salt.cmdRun(venvPepper, 'I@salt:master', "grep -qiv root_private_key ${wa29352SecretsFile}", true, null, false)
+ salt.cmdRun(venvPepper, 'I@salt:master', "test ! -f ${wa29352File}", true, null, false)
+ }
+ catch (Exception ex) {
+ common.infoMsg('Work-around for PROD-29352 already apply, nothing todo')
+ return
+ }
+ def rKeysDict = [
+ 'parameters': [
+ '_param': [
+ 'root_private_key': salt.getPillar(venvPepper, 'I@salt:master', '_param:root_private_key').get('return')[0].values()[0].trim(),
+ 'root_public_key' : '',
+ ]
+ ]
+ ]
+ // save root key,and generate public one from it
+ writeFile file: _tempFile, text: rKeysDict['parameters']['_param']['root_private_key'].toString().trim()
+ sh('chmod 0600 ' + _tempFile)
+ rKeysDict['parameters']['_param']['root_public_key'] = sh(script: "ssh-keygen -q -y -f ${_tempFile}", returnStdout: true).trim()
+ sh('rm -fv ' + _tempFile)
+ writeYaml file: _tempFile, data: rKeysDict
+ def yamlData = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "echo '${yamlData}' | base64 -d > ${wa29352File}", false, null, false)
+ common.infoMsg("Add $wa29352ClassName class into secrets.yml")
+
+ // Add 'classes:' directive
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q 'classes:' infra/secrets.yml || sed -i '1iclasses:' infra/secrets.yml")
+
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q '${wa29352ClassName}' infra/secrets.yml || sed -i '/classes:/ a - $wa29352ClassName' infra/secrets.yml")
+ salt.fullRefresh(venvPepper, '*')
+ sh('rm -fv ' + _tempFile)
+}
+
+def wa29155(ArrayList saltMinions, String cname) {
+ // WA for PROD-29155. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/
+ // CHeck for existence cmp nodes, and try to render it. Is failed, apply ssh-key wa
+ def ret = ''
+ def patched = false
+ def wa29155ClassName = 'cluster.' + cname + '.infra.secrets_nova_wa29155'
+ def wa29155File = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets_nova_wa29155.yml"
+
+ try {
+ salt.cmdRun(venvPepper, 'I@salt:master', "test ! -f ${wa29155File}", true, null, false)
+ }
+ catch (Exception ex) {
+ common.infoMsg('Work-around for PROD-29155 already apply, nothing todo')
+ return
+ }
+ salt.fullRefresh(venvPepper, 'cfg*')
+ salt.fullRefresh(venvPepper, 'cmp*')
+ for (String minion in saltMinions) {
+ if (!minion.startsWith('cmp')) {
+ continue
+ }
+ // First attempt, second will be performed in next validateReclassModel() stages
+ try {
+ salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ } catch (Exception e) {
+ common.errorMsg(e.toString())
+ if (patched) {
+ error("Node: ${minion} failed to render after reclass-system upgrade!WA29155 probably didn't help.")
+ }
+ // check, that failed exactly by our case, by key-length check.
+ def missed_key = salt.getPillar(venvPepper, minion, '_param:nova_compute_ssh_private').get("return")[0].values()[0]
+ if (missed_key != '') {
+ error("Node: ${minion} failed to render after reclass-system upgrade!")
+ }
+ common.warningMsg('Perform: Attempt to apply WA for PROD-29155\n' +
+ 'See https://gerrit.mcp.mirantis.com/#/c/37932/ for more info')
+ common.warningMsg('WA-PROD-29155 Generating new ssh key at master node')
+ def _tempFile = "/tmp/nova_wa29155_" + UUID.randomUUID().toString().take(8)
+ common.infoMsg('Perform: generation NEW ssh-private key for nova-compute')
+ salt.cmdRun(venvPepper, 'I@salt:master', "ssh-keygen -f ${_tempFile} -N '' -q")
+ def _pub_k = salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', "cat ${_tempFile}.pub").get('return')[0].values()[0].trim()
+ def _priv_k = salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', "cat ${_tempFile}").get('return')[0].values()[0].trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "rm -fv ${_tempFile}", false, null, false)
+ def novaKeysDict = [
+ "parameters": [
+ "_param": [
+ "nova_compute_ssh_private": _priv_k,
+ "nova_compute_ssh_public" : _pub_k
+ ]
+ ]
+ ]
+ writeYaml file: _tempFile, data: novaKeysDict
+ def yamlData = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "echo '${yamlData}' | base64 -d > ${wa29155File}", false, null, false)
+ common.infoMsg("Add $wa29155ClassName class into secrets.yml")
+
+ // Add 'classes:' directive
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q 'classes:' infra/secrets.yml || sed -i '1iclasses:' infra/secrets.yml")
+
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q '${wa29155ClassName}' infra/secrets.yml || sed -i '/classes:/ a - $wa29155ClassName' infra/secrets.yml")
+ salt.fullRefresh(venvPepper, 'cfg*')
+ salt.fullRefresh(venvPepper, 'cmp*')
+ patched = true
+ }
+ }
+ if (patched) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && git status && " +
+ "git add ${wa29155File} && git add -u && git commit --allow-empty -m 'Cluster model updated with WA for PROD-29155. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/ at ${common.getDatetime()}' ")
+ common.infoMsg('Work-around for PROD-29155 successfully applied')
+ }
+
+}
+
def archiveReclassInventory(filename) {
- def ret = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -i", true, null, false)
- def reclassInv = ret.values()[0]
- writeFile file: filename, text: reclassInv.toString()
- archiveArtifacts artifacts: "$filename"
+ def _tmp_file = '/tmp/' + filename + UUID.randomUUID().toString().take(8)
+ // jenkins may fail at overheap. Compress data with gzip like WA
+ def ret = salt.cmdRun(venvPepper, 'I@salt:master', 'reclass -i 2>/dev/null | gzip -9 -c | base64', true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ def _tmp = sh(script: "echo '$ret' > ${_tmp_file}", returnStdout: false)
+ sh(script: "cat ${_tmp_file} | base64 -d | gzip -d > $filename", returnStdout: false)
+ archiveArtifacts artifacts: filename
+ sh(script: "rm -v ${_tmp_file}|| true")
}
def validateReclassModel(ArrayList saltMinions, String suffix) {
try {
- dir(suffix) {
- for(String minion in saltMinions) {
- common.infoMsg("Reclass model validation for minion ${minion}...")
- def ret = salt.cmdRun("${workspace}/${venvPepper}", 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0]
- writeFile file: minion, text: ret.toString()
- }
+ for (String minion in saltMinions) {
+ common.infoMsg("Reclass model validation for minion ${minion}...")
+ def reclassInv = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ writeFile file: "inventory-${minion}-${suffix}.out", text: reclassInv.toString()
}
} catch (Exception e) {
common.errorMsg('Can not validate current Reclass model. Inspect failed minion manually.')
- error(e)
+ error(e.toString())
}
}
-def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix, String newSuffix) {
- def diffDir = 'pillarsDiff'
- dir(diffDir) {
- for(String minion in saltMinions) {
- def fileName = "reclass-model-${minion}-diff.out"
- sh "diff -u ${workspace}/${oldSuffix}/${minion} ${workspace}/${newSuffix}/${minion} > ${fileName} || true"
- }
+def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix = 'before', String newSuffix = 'after') {
+ for (String minion in saltMinions) {
+ def fileName = "reclass-model-${minion}-diff.out"
+ sh "diff -u inventory-${minion}-${oldSuffix}.out inventory-${minion}-${newSuffix}.out > ${fileName} || true"
+ archiveArtifacts artifacts: "${fileName}"
}
- archiveArtifacts artifacts: "${oldSuffix}/*"
- archiveArtifacts artifacts: "${newSuffix}/*"
- archiveArtifacts artifacts: "${diffDir}/*"
}
if (common.validInputParam('PIPELINE_TIMEOUT')) {
try {
pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
- } catch(Exception e) {
+ } catch (Exception e) {
common.warningMsg("Provided PIPELINE_TIMEOUT parameter has invalid value: ${env.PIPELINE_TIMEOUT} - should be interger")
}
}
timeout(time: pipelineTimeout, unit: 'HOURS') {
- node("python && docker") {
+ node("python") {
try {
+ def inventoryBeforeFilename = "reclass-inventory-before.out"
+ def inventoryAfterFilename = "reclass-inventory-after.out"
workspace = common.getWorkspace()
- deleteDir()
targetMcpVersion = null
if (!common.validInputParam('TARGET_MCP_VERSION') && !common.validInputParam('MCP_VERSION')) {
error('You must specify MCP version in TARGET_MCP_VERSION|MCP_VERSION variable')
}
// bw comp. for 2018.X => 2018.11 release
- if (common.validInputParam('MCP_VERSION')){
+ if (common.validInputParam('MCP_VERSION')) {
targetMcpVersion = env.MCP_VERSION
common.warningMsg("targetMcpVersion has been changed to:${targetMcpVersion}, which was taken from deprecated pipeline viriable:MCP_VERSION")
- }
- else {
+ } else {
targetMcpVersion = env.TARGET_MCP_VERSION
}
// end bw comp. for 2018.X => 2018.11 release
@@ -160,26 +286,27 @@
updateLocalRepos = env.getProperty('UPDATE_LOCAL_REPOS').toBoolean()
reclassSystemBranch = reclassSystemBranchDefault
}
-
python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
-
- def pillarsBeforeSuffix = 'pillarsBefore'
- def pillarsAfterSuffix = 'pillarsAfter'
- def inventoryBeforeFilename = "reclass-inventory-before.out"
- def inventoryAfterFilename = "reclass-inventory-after.out"
-
def minions = salt.getMinions(venvPepper, '*')
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
+ if (cluster_name == '' || cluster_name == 'null' || cluster_name == null) {
+ error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
+ }
- stage("Update Reclass and Salt-Formulas ") {
- validateReclassModel(minions, pillarsBeforeSuffix)
+ stage('Update Reclass and Salt-Formulas') {
+ common.infoMsg('Perform: Full salt sync')
+ salt.fullRefresh(venvPepper, '*')
+ common.infoMsg('Perform: Validate reclass medata before processing')
+ validateReclassModel(minions, 'before')
+
+ common.infoMsg('Perform: archiveReclassInventory before upgrade')
archiveReclassInventory(inventoryBeforeFilename)
try {
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
+ salt.cmdRun(venvPepper, 'I@salt:master', 'cd /srv/salt/reclass/ && git status && git diff-index --quiet HEAD --')
}
catch (Exception ex) {
- error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
+ error('You have uncommitted changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.')
}
if (updateClusterModel) {
common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
@@ -202,7 +329,28 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
"grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.updates/system.linux.system.repo.mcp.apt_mirantis.update/g'")
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
- "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
+ "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
+
+ // Switch Jenkins/Gerrit to use LDAP SSL/TLS
+ def gerritldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly 'gerrit_ldap_server: .*' * | grep -Po 'gerrit_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ if (gerritldapURI.startsWith('ldap://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+ } else if (! gerritldapURI.startsWith('ldaps://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|gerrit_ldap_server: .*|gerrit_ldap_server: \"ldaps://${gerritldapURI}\"|g'")
+ }
+ def jenkinsldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly 'jenkins_security_ldap_server: .*' * | grep -Po 'jenkins_security_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ if (jenkinsldapURI.startsWith('ldap://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+ } else if (! jenkinsldapURI.startsWith('ldaps://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|jenkins_security_ldap_server: .*|jenkins_security_ldap_server: \"ldaps://${jenkinsldapURI}\"|g'")
+ }
+
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
// Add kubernetes-extra repo
if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
@@ -217,7 +365,7 @@
}
// Add all update repositories
def repoIncludeBase = '- system.linux.system.repo.mcp.apt_mirantis.'
- def updateRepoList = [ 'cassandra', 'ceph', 'contrail', 'docker', 'elastic', 'extra', 'openstack', 'percona', 'salt-formulas', 'saltstack', 'ubuntu' ]
+ def updateRepoList = ['cassandra', 'ceph', 'contrail', 'docker', 'elastic', 'extra', 'openstack', 'percona', 'salt-formulas', 'saltstack', 'ubuntu']
updateRepoList.each { repo ->
def repoNameUpdateInclude = "${repoIncludeBase}update.${repo}"
def filesWithInclude = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && grep -Plr '\\${repoIncludeBase}${repo}\$' . || true", false).get('return')[0].values()[0].trim().tokenize('\n')
@@ -226,7 +374,7 @@
if (updateRepoIncludeExist == 'not_found') {
// Include needs to be added
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
- "sed -i 's/\\( *\\)${repoIncludeBase}${repo}\$/&\\n\\1${repoNameUpdateInclude}/g' ${file}")
+ "sed -i 's/\\( *\\)${repoIncludeBase}${repo}\$/&\\n\\1${repoNameUpdateInclude}/g' ${file}")
common.infoMsg("Update repo for ${repo} is added to ${file}")
}
}
@@ -247,121 +395,22 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && " +
"git add -u && git commit --allow-empty -m 'Cluster model update to the release $targetMcpVersion on $dateTime'")
}
-
- salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'saltutil.refresh_pillar')
- try {
- salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
- } catch (Exception e) {
- common.errorMsg("Something wrong with model after UPDATE_CLUSTER_MODEL step. Please check model.")
- throw e
- }
-
- common.infoMsg('Running a check for compatibility with new Reclass/Salt-Formulas packages')
- def saltModelDir = 'salt-model'
- def nodesArtifact = 'pillarsFromValidation.tar.gz'
- def reclassModel = 'reclassModel.tar.gz'
- def pillarsAfterValidation = 'pillarsFromValidation'
- try {
- def repos = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:repo").get("return")[0].values()[0]
- def cfgInfo = salt.getPillar(venvPepper, 'I@salt:master', "reclass:storage:node:infra_cfg01_node").get("return")[0].values()[0]
- def docker_image_for_test = salt.getPillar(venvPepper, 'I@salt:master', "_param:docker_image_cvp_sanity_checks").get("return")[0].values()[0]
- def saltModelTesting = new com.mirantis.mk.SaltModelTesting()
- def config = [
- 'dockerHostname': "cfg01",
- 'distribRevision': "${targetMcpVersion}",
- 'baseRepoPreConfig': true,
- 'extraRepoMergeStrategy': 'override',
- 'dockerContainerName': 'new-reclass-package-check',
- 'dockerMaxCpus': 1,
- 'image': docker_image_for_test,
- 'dockerExtraOpts': [
- "-v ${env.WORKSPACE}/${saltModelDir}:/srv/salt/reclass",
- "--entrypoint ''",
- ],
- 'extraRepos': ['repo': repos, 'aprConfD': "APT::Get::AllowUnauthenticated 'true';" ],
- 'envOpts': [ "CLUSTER_NAME=${cluster_name}", "NODES_ARTIFACT_NAME=${nodesArtifact}" ]
- ]
- def tarName = '/tmp/currentModel.tar.gz'
- salt.cmdRun(venvPepper, 'I@salt:master', "tar -cf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass classes")
- if (cfgInfo == '') {
- // case for old setups when cfg01 node model was static
- def node_name = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:name").get("return")[0].values()[0]
- def node_domain = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:domain").get("return")[0].values()[0]
- salt.cmdRun(venvPepper, 'I@salt:master', "tar -rf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass nodes/${node_name}.${node_domain}.yml")
- config['envOpts'].add("CFG_NODE_NAME=${node_name}.${node_domain}")
- }
- def modelHash = salt.cmdRun(venvPepper, 'I@salt:master', "cat ${tarName} | gzip -9 -c | base64", false, null, false).get('return')[0].values()[0]
- writeFile file: 'modelHash', text: modelHash
- sh "cat modelHash | base64 -d | gzip -d > ${reclassModel}"
- sh "mkdir ${saltModelDir} && tar -xf ${reclassModel} -C ${saltModelDir}"
-
- config['runCommands'] = [
- '001_Install_Salt_Reclass_Packages': { sh('apt-get install -y reclass salt-formula-*') },
- '002_Get_new_nodes': {
- try {
- sh('''#!/bin/bash
- new_generated_dir=/srv/salt/_new_nodes
- new_pillar_dir=/srv/salt/_new_pillar
- reclass_classes=/srv/salt/reclass/classes/
- mkdir -p ${new_generated_dir} ${new_pillar_dir}
- nodegenerator -b ${reclass_classes} -o ${new_generated_dir} ${CLUSTER_NAME}
- for node in $(ls ${new_generated_dir}); do
- nodeName=$(basename -s .yml ${node})
- reclass -n ${nodeName} -c ${reclass_classes} -u ${new_generated_dir} > ${new_pillar_dir}/${nodeName}
- done
- if [[ -n "${CFG_NODE_NAME}" ]]; then
- reclass -n ${CFG_NODE_NAME} -c ${reclass_classes} -u /srv/salt/reclass/nodes > ${new_pillar_dir}/${CFG_NODE_NAME}
- fi
- tar -czf /tmp/${NODES_ARTIFACT_NAME} -C ${new_pillar_dir}/ .
- ''')
- } catch (Exception e) {
- print "Test new nodegenerator tool is failed: ${e}"
- throw e
- }
- },
- ]
- config['runFinally'] = [ '001_Archive_nodegenerator_artefact': {
- sh(script: "mv /tmp/${nodesArtifact} ${env.WORKSPACE}/${nodesArtifact}")
- archiveArtifacts artifacts: nodesArtifact
- }]
- saltModelTesting.setupDockerAndTest(config)
- def pillarsValidationDiff = "${pillarsAfterValidation}/diffFromOriginal"
- sh "mkdir -p ${pillarsValidationDiff} && tar -xf ${nodesArtifact} --dir ${pillarsAfterValidation}/"
- def changesFound = false
- for(String minion in minions) {
- try {
- sh (script:"diff -u -w -I '^Salt command execution success' -I '^ node: ' -I '^ uri: ' -I '^ timestamp: ' ${pillarsBeforeSuffix}/${minion} ${pillarsAfterValidation}/${minion} > ${pillarsValidationDiff}/${minion}", returnStdout: true)
- } catch(Exception e) {
- changesFound = true
- archiveArtifacts artifacts: "${pillarsValidationDiff}/${minion}"
- def buildUrl = env.BUILD_URL ? env.BUILD_URL : "${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_NUMBER}"
- common.errorMsg("Found diff changes for ${minion} minion: ${buildUrl}/artifact/${pillarsValidationDiff}/${minion}/*view*/ ")
- }
- }
- if (changesFound) {
- common.warningMsg('Found diff changes between current pillar data and updated. Inspect logs above.')
- input message: 'Continue anyway?'
- } else {
- common.infoMsg('Diff between current pillar data and updated one - not found.')
- }
- } catch (Exception updateErr) {
- common.warningMsg(updateErr)
- common.warningMsg('Failed to validate update Salt Formulas repos/packages.')
- input message: 'Continue anyway?'
- } finally {
- sh "rm -rf ${saltModelDir} ${nodesArtifact} ${pillarsAfterValidation} ${reclassModel}"
- }
-
try {
common.infoMsg('Perform: UPDATE Salt Formulas')
+ salt.fullRefresh(venvPepper, '*')
+ salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
- salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas",'salt.master.env'])
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas", 'salt.master.env'])
+ salt.fullRefresh(venvPepper, '*')
} catch (Exception updateErr) {
common.warningMsg(updateErr)
common.warningMsg('Failed to update Salt Formulas repos/packages. Check current available documentation on https://docs.mirantis.com/mcp/latest/, how to update packages.')
input message: 'Continue anyway?'
}
+ wa29352(minions, cluster_name)
+ wa29155(minions, cluster_name)
+
try {
common.infoMsg('Perform: UPDATE Reclass package')
salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'pkg.install', ["reclass"])
@@ -381,21 +430,23 @@
}
salt.fullRefresh(venvPepper, '*')
-
try {
salt.cmdRun(venvPepper, 'I@salt:master', "reclass-salt --top")
}
catch (Exception ex) {
- error("Reclass fails rendering. Pay attention to your cluster model.")
+
+ error('Reclass fails rendering. Pay attention to your cluster model.' +
+ 'ErrorMessage:' + ex.toString())
}
+ common.infoMsg('Perform: archiveReclassInventory AFTER upgrade')
archiveReclassInventory(inventoryAfterFilename)
sh "diff -u $inventoryBeforeFilename $inventoryAfterFilename > reclass-inventory-diff.out || true"
archiveArtifacts artifacts: "reclass-inventory-diff.out"
- validateReclassModel(minions, pillarsAfterSuffix)
- archiveReclassModelChanges(minions, pillarsBeforeSuffix, pillarsAfterSuffix)
+ validateReclassModel(minions, 'after')
+ archiveReclassModelChanges(minions)
}
if (updateLocalRepos) {
@@ -437,7 +488,7 @@
}
}
- stage("Update Drivetrain") {
+ stage('Update Drivetrain') {
if (upgradeSaltStack) {
updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
@@ -446,28 +497,28 @@
}
if (updatePipelines) {
- triggerMirrorJob("git-mirror-downstream-mk-pipelines")
- triggerMirrorJob("git-mirror-downstream-pipeline-library")
+ common.infoMsg('Perform: UPDATE git repos')
+ triggerMirrorJob('git-mirror-downstream-mk-pipelines', reclassSystemBranch)
+ triggerMirrorJob('git-mirror-downstream-pipeline-library', reclassSystemBranch)
}
// update minions certs
- // call for `salt.minion.ca` state on related nodes to make sure
- // mine was updated with required data after salt-minion/salt-master restart salt:minion:ca
- salt.enforceState(venvPepper, "I@salt:minion:ca", 'salt.minion.ca', true)
salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
- // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
- salt.enforceState([saltId: venvPepper, target: "I@salt:minion ${extra_tgt}", state: ['salt.minion'], read_timeout: 60, retries: 2])
- // updating users and keys
+
+ // Retry needed only for rare race-condition in user appearance
+ common.infoMsg('Perform: updating users and keys')
salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)
+ common.infoMsg('Perform: updating openssh')
salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
- salt.enforceState(venvPepper, "I@jenkins:client", 'jenkins.client', true)
+ // Apply changes for HaProxy on CI/CD nodes
+ salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
- salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client' state.sls docker.client --async")
+ salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
sleep(180)
- common.infoMsg("Checking if Docker containers are up")
+ common.infoMsg('Perform: Checking if Docker containers are up')
try {
common.retry(10, 30) {
@@ -477,6 +528,8 @@
catch (Exception ex) {
error("Docker containers for CI/CD services are having troubles with starting.")
}
+
+ salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
}
}
catch (Throwable e) {
@@ -485,4 +538,4 @@
throw e
}
}
-}
+}
\ No newline at end of file
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 930a27d..962c4ed 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -36,6 +36,9 @@
*
* PARALLEL_PERFORMANCE If enabled, run Rally tests separately in parallel for each sub directory found
* inside RALLY_SCENARIOS and RALLY_SL_SCENARIOS (if STACKLIGHT_RALLY is enabled)
+ * GENERATE_REPORT Set this to false if you are running longevity tests on a cicd node with less than
+ * 21GB memory. Rally consumes lots of memory when generating reports sourcing week
+ * amounts of data (BUG PROD-30433)
*/
common = new com.mirantis.mk.Common()
@@ -56,6 +59,7 @@
def pluginsRepo = rally.get('RALLY_PLUGINS_REPO') ?: 'https://github.com/Mirantis/rally-plugins'
def pluginsBranch = rally.get('RALLY_PLUGINS_BRANCH') ?: 'master'
def tags = rally.get('RALLY_TAGS') ?: []
+def generateReport = rally.get('GENERATE_REPORT', true).toBoolean()
// contrainer working dir vars
def rallyWorkdir = '/home/rally'
@@ -194,7 +198,7 @@
platform, rally.RALLY_SCENARIOS,
rally.RALLY_SL_SCENARIOS, rally.RALLY_TASK_ARGS_FILE,
rally.RALLY_DB_CONN_STRING, tags,
- rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+ rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST, generateReport
)
def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
@@ -301,7 +305,8 @@
curPlatform, commonScens,
stacklightScens, rally.RALLY_TASK_ARGS_FILE,
rally.RALLY_DB_CONN_STRING, tags,
- rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+ rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST,
+ generateReport
)
// copy required files for the current task