Merge "update pipelines for refactored grains" into release/proposed/2019.2.0
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index c193d39..3c195ea 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -1,11 +1,11 @@
/**
*
- * Add Ceph node to existing cluster using upmap mechanism
+ * Add Ceph OSD node to existing cluster using upmap mechanism
*
* Requred parameters:
* SALT_MASTER_URL URL of Salt master
* SALT_MASTER_CREDENTIALS Credentials to the Salt API
- * HOST Host (minion id) to be added
+ * HOST OSD Host (minion id) to be added
*
*/
@@ -39,58 +39,99 @@
timeout(time: 12, unit: 'HOURS') {
node("python") {
- // create connection to salt master
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ try {
+ // create connection to salt master
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- stage("verify client versions") {
- def admin = salt.getMinions(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin")[0]
- def versions = salt.cmdRun(pepperEnv, admin, "ceph features", checkResponse = true, batch = null, output = false).values()[0]
-
- if (versions[0][admin].contains('jewel')) {
- throw new Exception("Update all clients to luminous before using this pipeline")
+ if (!HOST.toLowerCase().contains("osd")) {
+ common.errorMsg("This pipeline can only be used to add new OSD nodes to an existing Ceph cluster.")
+ throw new InterruptedException()
}
- }
- stage("enable luminous compat") {
- runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
- }
-
- stage("enable upmap balancer") {
- runCephCommand('ceph balancer on')['return'][0].values()[0]
- runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
- }
-
-
- stage("set norebalance") {
- runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
- }
-
- stage('Install Ceph OSD') {
- orchestrate.installCephOsd(pepperEnv, HOST)
- }
-
- def mapping = []
-
- stage("update mappings") {
- def pgmap
- for (int x = 1; x <= 3; x++) {
- pgmap = getpgmap()
- if (pgmap == '') {
- return 1
- } else {
- pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
- generatemapping(pepperEnv, pgmap, mapping)
- mapping.each(this.&runCephCommand)
+ stage ("verify client versions")
+ {
+ // I@docker:swarm and I@prometheus:server - mon* nodes
+ def nodes = salt.getMinions(pepperEnv, "I@ceph:common and not ( I@docker:swarm and I@prometheus:server )")
+ for ( node in nodes )
+ {
+ def versions = salt.cmdRun(pepperEnv, node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+ versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
+ if ( versions['client']['group']['release'] != 'luminous' )
+ {
+ throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+ }
}
}
- }
- stage("unset norebalance") {
+ stage("enable luminous compat") {
+ runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+ }
+
+ stage("enable upmap balancer") {
+ runCephCommand('ceph balancer on')['return'][0].values()[0]
+ runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+ }
+
+ stage("set norebalance") {
+ runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+ }
+
+ stage('Install infra') {
+ orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
+ }
+
+ stage('Install Ceph OSD') {
+ orchestrate.installCephOsd(pepperEnv, HOST)
+ }
+
+ stage("Update/Install monitoring") {
+ def prometheusNodes = salt.getMinions(pepperEnv, 'I@prometheus:server')
+ if (!prometheusNodes.isEmpty()) {
+ //Collect Grains
+ salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
+ sleep(5)
+ salt.enforceState(pepperEnv, HOST, ['fluentd', 'telegraf', 'prometheus'])
+ salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+ } else {
+ common.infoMsg('No Prometheus nodes in cluster. Nothing to do')
+ }
+ }
+
+ stage("Update host files") {
+ salt.enforceState(pepperEnv, '*', 'linux.network.host')
+ }
+
+ def mapping = []
+
+ stage("update mappings") {
+ def pgmap
+ for (int x = 1; x <= 3; x++) {
+ pgmap = getpgmap()
+ if (pgmap == '') {
+ return 1
+ } else {
+ pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
+ generatemapping(pepperEnv, pgmap, mapping)
+ mapping.each(this.&runCephCommand)
+ sleep(30)
+ }
+ }
+ }
+
+ stage("unset norebalance") {
+ runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+ }
+
+ stage("wait for healthy cluster") {
+ ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
+ }
+ }
+ catch (Throwable e) {
+ // There was an error or exception thrown. Unset norebalance.
runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
- }
-
- stage("wait for healthy cluster") {
- ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
+ throw e
}
}
}
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index f0e17b4..99f5e3c 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -100,7 +100,7 @@
file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@classname="tests.test_glance"]/properties/property']]
+ xpath: '/testsuites/testsuite/testcase[@classname="tests.test_glance"]/properties/property']]
plot csvFileName: 'plot-hw2hw.csv',
group: 'SPT',
style: 'line',
@@ -109,7 +109,7 @@
file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@classname="tests.test_hw2hw"]/properties/property']]
+ xpath: '/testsuites/testsuite/testcase[@classname="tests.test_hw2hw"]/properties/property']]
plot csvFileName: 'plot-vm2vm.csv',
group: 'SPT',
style: 'line',
@@ -118,7 +118,7 @@
file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
+ xpath: '/testsuites/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
}
try {
sh """
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 9b2d760..bb5f1e0 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -18,17 +18,19 @@
command = 'cmd.run'
pepperEnv = "pepperEnv"
errorOccured = false
+def packageUpgradeMode = ''
+def forceUpgradeComonents = false
-def upgrade(master, target, service, pckg, state) {
+def upgrade(master, target, service, pkg, state) {
stage("Upgrade ${service}") {
salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true)
salt.enforceState([saltId: master, target: "${target}", state: 'linux.system.repo', output: true, failOnError: true])
common.infoMsg("Upgrade ${service} package(s)")
try {
- salt.runSaltProcessStep(master, "${target}", command, ["apt-get install -y -o Dpkg::Options::=\"--force-confold\" ${pckg}"], null, true)
+ salt.runSaltProcessStep(master, "${target}", command, ["apt-get install -y -o Dpkg::Options::=\"--force-confold\" ${pkg}"], null, true)
} catch (Exception er) {
errorOccured = true
- common.errorMsg("[ERROR] ${pckg} package(s) was not upgraded.")
+ common.errorMsg("[ERROR] ${pkg} package(s) was not upgraded.")
throw er
}
common.infoMsg("Run ${state} state on ${target} nodes")
@@ -169,6 +171,18 @@
timeout(time: 12, unit: 'HOURS') {
node("python") {
+ if ((env.getProperty('OS_DIST_UPGRADE') ?: false).toBoolean()) {
+ packageUpgradeMode = 'dist-upgrade'
+ forceUpgradeComonents = true
+ } else if ((env.getProperty('OS_UPGRADE') ?: false).toBoolean()) {
+ packageUpgradeMode = 'upgrade'
+ forceUpgradeComonents = true
+ }
+
+ if(forceUpgradeComonents) {
+ common.infoMsg('Forcing to upgrade all Stacklight components because OS_DIST_UPGRADE or OS_UPGRADE is selected')
+ }
+
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
@@ -186,26 +200,7 @@
}
}
- if (STAGE_UPGRADE_SYSTEM_PART.toBoolean() == true && !errorOccured) {
- upgrade(pepperEnv, "I@telegraf:agent or I@telegraf:remote_agent", "telegraf", "telegraf", "telegraf")
- upgrade(pepperEnv, "I@fluentd:agent", "td-agent", "td-agent td-agent-additional-plugins", "fluentd")
- if (salt.testTarget(pepperEnv, "I@prometheus:relay")) {
- upgrade(pepperEnv, "I@prometheus:relay", "prometheus prometheus-relay", "prometheus-bin prometheus-relay", "prometheus")
- salt.runSaltProcessStep(pepperEnv, "I@prometheus:relay", "service.restart", "prometheus", null, true)
- }
- if (salt.testTarget(pepperEnv, "I@prometheus:exporters:libvirt")) {
- upgrade(pepperEnv, "I@prometheus:exporters:libvirt", "libvirt-exporter", "libvirt-exporter", "prometheus")
- }
- if (salt.testTarget(pepperEnv, "I@prometheus:exporters:jmx")) {
- upgrade(pepperEnv, "I@prometheus:exporters:jmx", "jmx-exporter", "jmx-exporter", "prometheus")
- }
- }
-
- if (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured) {
- upgrade_es_kibana(pepperEnv)
- }
-
- if (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured) {
+ if (forceUpgradeComonents || (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured)) {
stage('Upgrade docker components') {
try {
common.infoMsg('Disable and remove the previous versions of monitoring services')
@@ -229,6 +224,57 @@
}
}
}
+
+ if (forceUpgradeComonents || (STAGE_UPGRADE_SYSTEM_PART.toBoolean() == true && !errorOccured)) {
+ upgrade(pepperEnv, "I@telegraf:agent or I@telegraf:remote_agent", "telegraf", "telegraf", "telegraf")
+ upgrade(pepperEnv, "I@fluentd:agent", "td-agent", "td-agent td-agent-additional-plugins", "fluentd")
+ if (salt.testTarget(pepperEnv, "I@prometheus:relay")) {
+ upgrade(pepperEnv, "I@prometheus:relay", "prometheus prometheus-relay", "prometheus-bin prometheus-relay", "prometheus")
+ salt.runSaltProcessStep(pepperEnv, "I@prometheus:relay", "service.restart", "prometheus", null, true)
+ }
+ if (salt.testTarget(pepperEnv, "I@prometheus:exporters:libvirt")) {
+ upgrade(pepperEnv, "I@prometheus:exporters:libvirt", "libvirt-exporter", "libvirt-exporter", "prometheus")
+ }
+ if (salt.testTarget(pepperEnv, "I@prometheus:exporters:jmx")) {
+ upgrade(pepperEnv, "I@prometheus:exporters:jmx", "jmx-exporter", "jmx-exporter", "prometheus")
+ }
+ }
+
+ if (forceUpgradeComonents || (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured)) {
+ upgrade_es_kibana(pepperEnv)
+ }
+
+ stage('Upgrade OS') {
+ if (packageUpgradeMode) {
+ def stacklightNodes = salt.getMinions(pepperEnv, 'I@elasticsearch:server or I@prometheus:server')
+ def stacklightNodesWithDocker = salt.getMinions(pepperEnv, 'I@docker:swarm and I@prometheus:server')
+ def elasticSearchNodes = salt.getMinions(pepperEnv, 'I@elasticsearch:server')
+ def debian = new com.mirantis.mk.Debian()
+ for (stacklightNode in stacklightNodes) {
+ salt.runSaltProcessStep(pepperEnv, stacklightNode, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState([saltId: pepperEnv, target: stacklightNode, state: 'linux.system.repo', output: true, failOnError: true])
+ debian.osUpgradeNode(pepperEnv, stacklightNode, packageUpgradeMode, false, 60)
+ salt.checkTargetMinionsReady(['saltId': pepperEnv, 'target': stacklightNode, wait: 60, timeout: 10])
+ if (packageUpgradeMode == 'dist-upgrade' && stacklightNode in stacklightNodesWithDocker) {
+ common.infoMsg('Perform: Checking if Docker containers are up after reboot')
+ try {
+ common.retry(10, 30) {
+ salt.cmdRun(pepperEnv, stacklightNode, "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
+ }
+ }
+ catch (Exception ex) {
+ error("Docker containers for Stacklight services are having troubles with starting.")
+ }
+ }
+ if(stacklightNode in elasticSearchNodes) {
+ verify_es_is_green(pepperEnv)
+ }
+ }
+ } else {
+ common.infoMsg('Upgrade OS skipped...')
+ }
+ }
+
stage('Post upgrade steps') {
common.infoMsg('Apply workaround for PROD-33878')
salt.runSaltProcessStep(pepperEnv, "I@fluentd:agent and I@rabbitmq:server", "service.restart", "td-agent", null, true)
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 9a87c94..6ed9829 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -30,6 +30,14 @@
def packageUpgradeMode = ''
batchSize = ''
+def fullRefreshOneByOne(venvPepper, minions) {
+ for (minion in minions) {
+ salt.runSaltProcessStep(venvPepper, minion, 'saltutil.refresh_pillar', [], null, true, 60)
+ salt.runSaltProcessStep(venvPepper, minion, 'saltutil.refresh_grains', [], null, true, 60)
+ salt.runSaltProcessStep(venvPepper, minion, 'saltutil.sync_all', [], null, true, 180)
+ }
+}
+
def triggerMirrorJob(String jobName, String reclassSystemBranch) {
params = jenkinsUtils.getJobParameters(jobName)
try {
@@ -125,7 +133,6 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
"grep -q '${wa29352ClassName}' infra/secrets.yml || sed -i '/classes:/ a - $wa29352ClassName' infra/secrets.yml")
- salt.fullRefresh(venvPepper, '*')
sh('rm -fv ' + _tempFile)
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && git status && " +
"git add ${wa29352File} && git add -u && git commit --allow-empty -m 'Cluster model updated with WA for PROD-29352. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/ at ${common.getDatetime()}' ")
@@ -195,7 +202,7 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
"grep -q '${wa29155ClassName}' infra/secrets.yml || sed -i '/classes:/ a - $wa29155ClassName' infra/secrets.yml")
salt.fullRefresh(venvPepper, 'I@salt:master')
- salt.fullRefresh(venvPepper, saltMinions)
+ salt.runSaltProcessStep(venvPepper, saltMinions, 'saltutil.refresh_pillar', [], null, true, 60)
patched = true
}
}
@@ -228,6 +235,14 @@
}
}
+def check_34406(String cluster_name) {
+ def sphinxpasswordPillar = salt.getPillar(venvPepper, 'I@salt:master', '_param:sphinx_proxy_password_generated').get("return")[0].values()[0]
+ if (sphinxpasswordPillar == '' || sphinxpasswordPillar == 'null' || sphinxpasswordPillar == null) {
+ error('Sphinx password is not defined.\n' +
+ 'See https://docs.mirantis.com/mcp/q4-18/mcp-release-notes/mu/mu-9/mu-9-addressed/mu-9-dtrain/mu-9-dt-manual.html#i-34406 for more info')
+ }
+}
+
def wa32182(String cluster_name) {
if (salt.testTarget(venvPepper, 'I@opencontrail:control or I@opencontrail:collector')) {
def clusterModelPath = "/srv/salt/reclass/classes/cluster/${cluster_name}"
@@ -633,10 +648,14 @@
batchSize = (workerThreads * 2 / 3).toString().tokenize('.')[0]
}
def computeMinions = salt.getMinions(venvPepper, 'I@nova:compute')
+ def allMinions = salt.getMinions(venvPepper, '*')
stage('Update Reclass and Salt-Formulas') {
common.infoMsg('Perform: Full salt sync')
- salt.fullRefresh(venvPepper, '*')
+ fullRefreshOneByOne(venvPepper, allMinions)
+
+ check_34406(cluster_name)
+
common.infoMsg('Perform: Validate reclass medata before processing')
validateReclassModel(minions, 'before')
@@ -754,11 +773,11 @@
}
try {
common.infoMsg('Perform: UPDATE Salt Formulas')
- salt.fullRefresh(venvPepper, '*')
+ fullRefreshOneByOne(venvPepper, allMinions)
salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo', true, true, null, false, 60, 2)
def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas", 'salt.master.env'])
- salt.fullRefresh(venvPepper, '*')
+ fullRefreshOneByOne(venvPepper, allMinions)
} catch (Exception updateErr) {
common.warningMsg(updateErr)
common.warningMsg('Failed to update Salt Formulas repos/packages. Check current available documentation on https://docs.mirantis.com/mcp/latest/, how to update packages.')
@@ -790,7 +809,7 @@
error('Reclass fails rendering. Pay attention to your cluster model.')
}
- salt.fullRefresh(venvPepper, '*')
+ fullRefreshOneByOne(venvPepper, allMinions)
try {
salt.cmdRun(venvPepper, 'I@salt:master', "reclass-salt --top")
}
@@ -888,6 +907,7 @@
def wrongPluginJarName = "${gerritGlusterPath}/plugins/project-download-commands.jar"
salt.cmdRun(venvPepper, 'I@gerrit:client', "test -f ${wrongPluginJarName} && rm ${wrongPluginJarName} || true")
+ salt.enforceStateWithTest(venvPepper, 'I@jenkins:client:security and not I@salt:master', 'jenkins.client.security', "", true, true, null, true, 60, 2)
salt.enforceStateWithTest(venvPepper, 'I@jenkins:client and I@docker:client:images and not I@salt:master', 'docker.client.images', "", true, true, null, true, 60, 2)
salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
}
@@ -910,7 +930,7 @@
// Apply changes for HaProxy on CI/CD nodes
salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
-
+ salt.upgradePackageAndRestartSaltMinion(venvPepper, 'I@jenkins:client and not I@salt:master', 'python-jenkins')
salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true, true, null, false, 60, 2)
// update Nginx proxy settings for Jenkins/Gerrit if needed