Use Pepper instead of sending http request from jenkins master
Change-Id: I2ddfe19ae9c70731989da7ba07fa7720da797721
diff --git a/ceph-enforce-weights.groovy b/ceph-enforce-weights.groovy
index 4e06322..45ec06b 100644
--- a/ceph-enforce-weights.groovy
+++ b/ceph-enforce-weights.groovy
@@ -12,9 +12,9 @@
common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-// configure global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
def runCephCommand(master, cmd) {
return salt.cmdRun(master, ADMIN_HOST, cmd)
@@ -25,11 +25,10 @@
node("python") {
stage('Load cluster information') {
- // create connection to salt master
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
// get list of disk from grains
- grains = salt.getGrain(saltMaster, 'I@ceph:osd')['return'][0]
+ grains = salt.getGrain(pepperEnv, 'I@ceph:osd')['return'][0]
common.prettyPrint(grains)
}
@@ -52,7 +51,7 @@
print(disk.value)
print(disk.key)
def cmd = "ceph osd crush set ${osd_id} ${disk.value.weight} host=${hostname}"
- print(runCephCommand(saltMaster, cmd))
+ print(runCephCommand(pepperEnv, cmd))
}
}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index ac102eb..63f5713 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -15,9 +15,9 @@
common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-// configure global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
def flags = CLUSTER_FLAGS.tokenize(',')
def osds = OSD.tokenize(',')
@@ -28,19 +28,19 @@
node("python") {
// create connection to salt master
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
if (flags.size() > 0) {
stage('Set cluster flags') {
for (flag in flags) {
- runCephCommand(saltMaster, 'ceph osd set ' + flag)
+ runCephCommand(pepperEnv, 'ceph osd set ' + flag)
}
}
}
// get list of disk at the osd
- def pillar_disks = salt.getPillar(saltMaster, HOST, 'ceph:osd:disk')['return'][0].values()[0]
- def hostname_id = salt.getPillar(saltMaster, HOST, 'ceph:osd:host_id')['return'][0].values()[0]
+ def pillar_disks = salt.getPillar(pepperEnv, HOST, 'ceph:osd:disk')['return'][0].values()[0]
+ def hostname_id = salt.getPillar(pepperEnv, HOST, 'ceph:osd:host_id')['return'][0].values()[0]
def osd_ids = []
print("host_id is ${hostname_id}")
@@ -60,14 +60,14 @@
// `ceph osd out <id> <id>`
stage('Set OSDs out') {
- runCephCommand(saltMaster, 'ceph osd out ' + osd_ids.join(' '))
+ runCephCommand(pepperEnv, 'ceph osd out ' + osd_ids.join(' '))
}
// wait for healthy cluster
if (common.validInputParam('WAIT_FOR_HEALTHY') && WAIT_FOR_HEALTHY.toBoolean()) {
stage('Waiting for healthy cluster') {
while (true) {
- def health = runCephCommand(saltMaster, 'ceph health')['return'][0].values()[0]
+ def health = runCephCommand(pepperEnv, 'ceph health')['return'][0].values()[0]
if (health.contains('HEALTH OK')) {
common.infoMsg('Cluster is healthy')
break;
@@ -80,28 +80,28 @@
// stop osd daemons
stage('Stop OSD daemons') {
for (i in osd_ids) {
- salt.runSaltProcessStep(saltMaster, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
}
}
// `ceph osd crush remove osd.2`
stage('Remove OSDs from CRUSH') {
for (i in osd_ids) {
- runCephCommand(saltMaster, 'ceph osd crush remove ' + i)
+ runCephCommand(pepperEnv, 'ceph osd crush remove ' + i)
}
}
// remove keyring `ceph auth del osd.3`
stage('Remove OSD keyrings from auth') {
for (i in osd_ids) {
- runCephCommand(saltMaster, 'ceph auth del ' + i)
+ runCephCommand(pepperEnv, 'ceph auth del ' + i)
}
}
// remove osd `ceph osd rm osd.3`
stage('Remove OSDs') {
for (i in osd_ids) {
- runCephCommand(saltMaster, 'ceph osd rm ' + i)
+ runCephCommand(pepperEnv, 'ceph osd rm ' + i)
}
}
@@ -110,7 +110,7 @@
stage('Unset cluster flags') {
for (flag in flags) {
common.infoMsg('Removing flag ' + flag)
- runCephCommand(saltMaster, 'ceph osd unset ' + flag)
+ runCephCommand(pepperEnv, 'ceph osd unset ' + flag)
}
}
}
diff --git a/change-config.groovy b/change-config.groovy
index 16cd629..2f67b15 100644
--- a/change-config.groovy
+++ b/change-config.groovy
@@ -15,8 +15,9 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def targetTestSubset
def targetLiveSubset
def targetLiveAll
@@ -34,20 +35,20 @@
states = null
}
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
if (common.validInputParam("PULL_MODEL") && PULL_MODEL.toBoolean() == true) {
stage('Update the reclass cluster model') {
def saltMasterTarget = ['expression': 'I@salt:master', 'type': 'compound']
- result = salt.runSaltCommand(saltMaster, 'local', saltMasterTarget, 'state.apply', null, "reclass.storage.data")
+ result = salt.runSaltCommand(pepperEnv, 'local', saltMasterTarget, 'state.apply', null, "reclass.storage.data")
salt.checkResult(result)
}
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+ minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
}
@@ -68,7 +69,7 @@
def kwargs = [
'test': true
]
- result = salt.runSaltCommand(saltMaster, 'local', targetTestSubset, 'state.apply', null, states, kwargs)
+ result = salt.runSaltCommand(pepperEnv, 'local', targetTestSubset, 'state.apply', null, states, kwargs)
salt.checkResult(result)
}
@@ -79,7 +80,7 @@
}
stage('Apply config changes on sample') {
- result = salt.runSaltCommand(saltMaster, 'local', targetLiveSubset, 'state.apply', null, states)
+ result = salt.runSaltCommand(pepperEnv, 'local', targetLiveSubset, 'state.apply', null, states)
salt.checkResult(result)
}
@@ -90,7 +91,7 @@
}
stage('Apply config changes on all nodes') {
- result = salt.runSaltCommand(saltMaster, 'local', targetLiveAll, 'state.apply', null, states)
+ result = salt.runSaltCommand(pepperEnv, 'local', targetLiveAll, 'state.apply', null, states)
salt.checkResult(result)
}
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 782a051..d865b25 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -31,13 +31,15 @@
openstack = new com.mirantis.mk.Openstack()
salt = new com.mirantis.mk.Salt()
orchestrate = new com.mirantis.mk.Orchestrate()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
_MAX_PERMITTED_STACKS = 2
node {
try {
// connection objects
def openstackCloud
- def saltMaster
// value defaults
def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
@@ -115,7 +117,7 @@
saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
- saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
+ python.setupPepperVirtualenv(venvPepper, saltMasterUrl, SALT_MASTER_CREDENTIALS)
}
//
@@ -128,42 +130,42 @@
// sync_all
// linux,openssh,salt.minion.ntp
- orchestrate.installFoundationInfra(saltMaster)
- orchestrate.validateFoundationInfra(saltMaster)
+ orchestrate.installFoundationInfra(pepperEnv)
+ orchestrate.validateFoundationInfra(pepperEnv)
}
stage("Deploy GlusterFS") {
- salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
+ salt.enforceState(pepperEnv, 'I@glusterfs:server', 'glusterfs.server.service', true)
retry(2) {
- salt.enforceState(saltMaster, 'ci01*', 'glusterfs.server.setup', true)
+ salt.enforceState(pepperEnv, 'ci01*', 'glusterfs.server.setup', true)
}
sleep(5)
- salt.enforceState(saltMaster, 'I@glusterfs:client', 'glusterfs.client', true)
+ salt.enforceState(pepperEnv, 'I@glusterfs:client', 'glusterfs.client', true)
timeout(5) {
println "Waiting for GlusterFS volumes to get mounted.."
- salt.cmdRun(saltMaster, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
+ salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
}
- print common.prettyPrint(salt.cmdRun(saltMaster, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
+ print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
}
stage("Deploy GlusterFS") {
- salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy,keepalived')
+ salt.enforceState(pepperEnv, 'I@haproxy:proxy', 'haproxy,keepalived')
}
stage("Setup Docker Swarm") {
- salt.enforceState(saltMaster, 'I@docker:host', 'docker.host', true)
- salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.swarm', true)
- salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'salt', true)
- salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.flush')
- salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.update')
- salt.enforceState(saltMaster, 'I@docker:swarm', 'docker.swarm', true)
- print common.prettyPrint(salt.cmdRun(saltMaster, 'I@docker:swarm:role:master', 'docker node ls'))
+ salt.enforceState(pepperEnv, 'I@docker:host', 'docker.host', true)
+ salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.swarm', true)
+ salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'salt', true)
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.flush')
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.update')
+ salt.enforceState(pepperEnv, 'I@docker:swarm', 'docker.swarm', true)
+ print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@docker:swarm:role:master', 'docker node ls'))
}
stage("Configure OSS services") {
- salt.enforceState(saltMaster, 'I@devops_portal:config', 'devops_portal.config')
- salt.enforceState(saltMaster, 'I@rundeck:server', 'rundeck.server')
+ salt.enforceState(pepperEnv, 'I@devops_portal:config', 'devops_portal.config')
+ salt.enforceState(pepperEnv, 'I@rundeck:server', 'rundeck.server')
}
stage("Deploy Docker services") {
@@ -171,19 +173,19 @@
// services are deployed
// XXX: for some weird unknown reason, refresh_pillar is
// required to execute here
- salt.runSaltProcessStep(saltMaster, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
+ salt.runSaltProcessStep(pepperEnv, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, 'I@aptly:publisher', 'aptly.publisher', true)
retry(3) {
sleep(5)
- salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
+ salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.client')
}
// XXX: Workaround to have `/var/lib/jenkins` on all
// nodes where are jenkins_slave services are created.
- salt.runSaltProcessStep(saltMaster, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
}
stage("Configure CI/CD services") {
- salt.syncAll(saltMaster, '*')
+ salt.syncAll(pepperEnv, '*')
// Aptly
timeout(10) {
@@ -192,68 +194,68 @@
// XXX: retry to workaround magical VALUE_TRIMMED
// response from salt master + to give slow cloud some
// more time to settle down
- salt.cmdRun(saltMaster, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
}
}
- salt.enforceState(saltMaster, 'I@aptly:server', 'aptly', true)
+ salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
// OpenLDAP
timeout(10) {
println "Waiting for OpenLDAP to come up.."
- salt.cmdRun(saltMaster, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@openldap:client', 'openldap', true)
+ salt.enforceState(pepperEnv, 'I@openldap:client', 'openldap', true)
// Gerrit
timeout(10) {
println "Waiting for Gerrit to come up.."
- salt.cmdRun(saltMaster, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@gerrit:client', 'gerrit', true)
+ salt.enforceState(pepperEnv, 'I@gerrit:client', 'gerrit', true)
// Jenkins
timeout(10) {
println "Waiting for Jenkins to come up.."
- salt.cmdRun(saltMaster, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
}
retry(2) {
// XXX: needs retry as first run installs python-jenkins
// thus make jenkins modules available for second run
- salt.enforceState(saltMaster, 'I@jenkins:client', 'jenkins', true)
+ salt.enforceState(pepperEnv, 'I@jenkins:client', 'jenkins', true)
}
// Postgres client - initialize OSS services databases
timeout(300){
println "Waiting for postgresql database to come up.."
- salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
+ salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
}
// XXX: first run usually fails on some inserts, but we need to create databases at first
- salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
+ salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true, false)
// Setup postgres database with integration between
// Pushkin notification service and Security Monkey security audit service
timeout(10) {
println "Waiting for Pushkin to come up.."
- salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true)
+ salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true)
// Rundeck
timeout(10) {
println "Waiting for Rundeck to come up.."
- salt.cmdRun(saltMaster, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@rundeck:client', 'rundeck.client', true)
+ salt.enforceState(pepperEnv, 'I@rundeck:client', 'rundeck.client', true)
// Elasticsearch
timeout(10) {
println 'Waiting for Elasticsearch to come up..'
- salt.cmdRun(saltMaster, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
}
retry(3){
sleep(10)
// XXX: first run sometimes fails on update indexes, so we need to wait
- salt.enforceState(saltMaster, 'I@elasticsearch:client', 'elasticsearch.client', true)
+ salt.enforceState(pepperEnv, 'I@elasticsearch:client', 'elasticsearch.client', true)
}
}
@@ -263,7 +265,7 @@
//
def adminUser
def authorizedKeysFile
- def adminUserCmdOut = salt.cmdRun(saltMaster, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
+ def adminUserCmdOut = salt.cmdRun(pepperEnv, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
if (adminUserCmdOut =~ /ubuntu user exists/) {
adminUser = "ubuntu"
authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
@@ -274,7 +276,7 @@
if (sshPubKey) {
println "Deploying provided ssh key at ${authorizedKeysFile}"
- salt.cmdRun(saltMaster, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
+ salt.cmdRun(pepperEnv, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
}
//
@@ -284,14 +286,14 @@
try {
// Run sphinx state to install sphinx-build needed in
// upcomming orchestrate
- salt.enforceState(saltMaster, 'I@sphinx:server', 'sphinx')
+ salt.enforceState(pepperEnv, 'I@sphinx:server', 'sphinx')
} catch (Throwable e) {
true
}
retry(3) {
// TODO: fix salt.orchestrateSystem
- // print salt.orchestrateSystem(saltMaster, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
- def out = salt.cmdRun(saltMaster, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
+ // print salt.orchestrateSystem(pepperEnv, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
+ def out = salt.cmdRun(pepperEnv, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
print common.prettyPrint(out)
if (out =~ /Command execution failed/) {
throw new Exception("Command execution failed")
@@ -302,9 +304,9 @@
// errors are just ignored here
true
}
- salt.enforceState(saltMaster, 'I@nginx:server', 'nginx')
+ salt.enforceState(pepperEnv, 'I@nginx:server', 'nginx')
- def failedSvc = salt.cmdRun(saltMaster, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
+ def failedSvc = salt.cmdRun(pepperEnv, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
if (failedSvc =~ /Command execution failed/) {
common.errorMsg("Some services are not running. Environment may not be fully functional!")
}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 84e96b0..29d3007 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -284,7 +284,7 @@
}
stage('Connect Ceph') {
- orchestrate.connectCeph(saltMaster)
+ orchestrate.connectCeph(venvPepper)
}
}
diff --git a/deploy-k8s-deployments.groovy b/deploy-k8s-deployments.groovy
index 5a0bf9d..bcfaa0e 100644
--- a/deploy-k8s-deployments.groovy
+++ b/deploy-k8s-deployments.groovy
@@ -1,17 +1,17 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def pepperEnv = "pepperEnv"
targetExpression = TARGET_MINIONS ? TARGET_MINIONS : "E@kvm01.*"
node() {
- def saltMaster
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
common.infoMsg("Enforcing kubernetes state..")
stage("Update k8s control") {
salt.enforceState(
- saltMaster,
+ pepperEnv,
targetExpression,
'kubernetes.control',
true
@@ -28,13 +28,13 @@
if (extraCommand) {
salt.cmdRun(
- saltMaster,
+ pepperEnv,
targetExpression,
extraCommand
)
}
out = salt.cmdRun(
- saltMaster,
+ pepperEnv,
targetExpression,
'/bin/bash -c \'find /srv/kubernetes/ -type d | grep -v jobs | while read i; do ls $i/*.yml &>/dev/null && (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x); done; jobs=$(hyperkube kubectl get jobs -o name); find /srv/kubernetes/jobs -type f -name "*.yml" | while read i; do name=$(grep "name:" $i | head -1 | awk "{print $NF}"); echo $jobs|grep $name >/dev/null || (set -x; hyperkube kubectl apply -f $i || echo Command failed; set +x);done\''
)
diff --git a/docker-cleanup-pipeline.groovy b/docker-cleanup-pipeline.groovy
index 1d7b3b4..abc21d1 100644
--- a/docker-cleanup-pipeline.groovy
+++ b/docker-cleanup-pipeline.groovy
@@ -9,14 +9,17 @@
common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
node{
def saltMaster;
- stage("Connect to MCP salt master"){
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage("Clean old containers"){
- salt.cmdRun(saltMaster, 'I@jenkins:slave', """
+ salt.cmdRun(pepperEnv, 'I@jenkins:slave', """
docker ps --format='{{.ID}}' | xargs -n 1 -r docker inspect \\
-f '{{.ID}} {{.State.Running}} {{.State.StartedAt}}' \\
| awk '\$2 == "true" && \$3 <= "'\$(date -d '${TEST_DATE_STRING}' -Ins --utc \\
@@ -25,6 +28,6 @@
""", false)
}
stage("Run docker system prune"){
- salt.cmdRun(saltMaster, 'I@jenkins:slave', "docker system prune -f")
+ salt.cmdRun(pepperEnv, 'I@jenkins:slave', "docker system prune -f")
}
}
diff --git a/ironic-node-provision-pipeline.groovy b/ironic-node-provision-pipeline.groovy
index 1c96eaa..402254a 100644
--- a/ironic-node-provision-pipeline.groovy
+++ b/ironic-node-provision-pipeline.groovy
@@ -38,20 +38,20 @@
orchestrate = new com.mirantis.mk.Orchestrate()
salt = new com.mirantis.mk.Salt()
test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
def venv
def outputs = [:]
def ipRegex = "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}"
-def waitIronicDeployment(master, node_names, target, auth_profile, deploy_timeout=60) {
+def waitIronicDeployment(pepperEnv, node_names, target, auth_profile, deploy_timeout=60) {
def failed_nodes = []
timeout (time: deploy_timeout.toInteger(), unit: 'MINUTES'){
while (node_names.size() != 0) {
common.infoMsg("Waiting for nodes: " + node_names.join(", ") + " to be deployed.")
- res = salt.runSaltProcessStep(master, target, 'ironicng.list_nodes', ["profile=${auth_profile}"], null, false)
+ res = salt.runSaltProcessStep(pepperEnv, target, 'ironicng.list_nodes', ["profile=${auth_profile}"], null, false)
for (n in res['return'][0].values()[0]['nodes']){
if (n['name'] in node_names) {
if (n['provision_state'] == 'active'){
@@ -151,8 +151,8 @@
outputs.put('salt_api', SALT_MASTER_URL)
- // Connect to Salt master
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
def nodes_to_deploy=[]
@@ -160,10 +160,10 @@
stage('Trigger deployment on nodes') {
if (IRONIC_DEPLOY_PARTITION_PROFILE == '' && IRONIC_DEPLOY_PROFILE == '' && IRONIC_DEPLOY_NODES == 'all'){
common.infoMsg("Trigger ironic.deploy")
- salt.enforceState(saltMaster, RUN_TARGET, ['ironic.deploy'], true)
+ salt.enforceState(pepperEnv, RUN_TARGET, ['ironic.deploy'], true)
} else {
if (IRONIC_DEPLOY_NODES == 'all'){
- res = salt.runSaltProcessStep(saltMaster, RUN_TARGET, 'ironicng.list_nodes', ["profile=${IRONIC_AUTHORIZATION_PROFILE}"], null, true)
+ res = salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.list_nodes', ["profile=${IRONIC_AUTHORIZATION_PROFILE}"], null, true)
// We trigger deployment on single salt minion
for (n in res['return'][0].values()[0]['nodes']){
nodes_to_deploy.add(n['name'])
@@ -180,13 +180,13 @@
for (n in nodes_to_deploy){
common.infoMsg("Trigger deployment of ${n}")
- salt.runSaltProcessStep(saltMaster, RUN_TARGET, 'ironicng.deploy_node', ["${n}"] + cmd_params, null, true)
+ salt.runSaltProcessStep(pepperEnv, RUN_TARGET, 'ironicng.deploy_node', ["${n}"] + cmd_params, null, true)
}
}
}
stage('Waiting for deployment is done.') {
- def failed_nodes = waitIronicDeployment(saltMaster, nodes_to_deploy, RUN_TARGET, IRONIC_AUTHORIZATION_PROFILE, IRONIC_DEPLOY_TIMEOUT)
+ def failed_nodes = waitIronicDeployment(pepperEnv, nodes_to_deploy, RUN_TARGET, IRONIC_AUTHORIZATION_PROFILE, IRONIC_DEPLOY_TIMEOUT)
if (failed_nodes){
common.errorMsg("Some nodes: " + failed_nodes.join(", ") + " are failed to deploy")
currentBuild.result = 'FAILURE'
diff --git a/mk-k8s-simple-deploy-pipeline.groovy b/mk-k8s-simple-deploy-pipeline.groovy
index b0aca53..aa81f5b 100644
--- a/mk-k8s-simple-deploy-pipeline.groovy
+++ b/mk-k8s-simple-deploy-pipeline.groovy
@@ -29,13 +29,15 @@
salt = new com.mirantis.mk.Salt()
orchestrate = new com.mirantis.mk.Orchestrate()
test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
artifacts_dir = "_artifacts"
node {
// connection objects
def openstackCloud
- def saltMaster
// value defaults
def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
@@ -70,34 +72,34 @@
stage("Connect to Salt master") {
saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
saltMasterUrl = "http://${saltMasterHost}:8088"
- saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
+ python.setupPepperVirtualenv(venvPepper, saltMasterUrl, SALT_MASTER_CREDENTIALS)
}
stage("Install core infra") {
- orchestrate.installFoundationInfra(saltMaster)
- orchestrate.validateFoundationInfra(saltMaster)
+ orchestrate.installFoundationInfra(pepperEnv)
+ orchestrate.validateFoundationInfra(pepperEnv)
}
stage("Install Kubernetes infra") {
- orchestrate.installOpenstackMcpInfra(saltMaster)
+ orchestrate.installOpenstackMcpInfra(pepperEnv)
}
stage("Install Kubernetes control") {
- orchestrate.installOpenstackMcpControl(saltMaster)
+ orchestrate.installOpenstackMcpControl(pepperEnv)
}
if (RUN_TESTS == "1") {
sleep(30)
stage('Run k8s bootstrap tests') {
- test.runConformanceTests(saltMaster, 'ctl01*', K8S_API_SERVER, 'tomkukral/k8s-scripts')
+ test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, 'tomkukral/k8s-scripts')
}
stage("Run k8s conformance e2e tests") {
- test.runConformanceTests(saltMaster, 'ctl01*', K8S_API_SERVER, CONFORMANCE_IMAGE)
+ test.runConformanceTests(pepperEnv, 'ctl01*', K8S_API_SERVER, CONFORMANCE_IMAGE)
}
stage("Copy k8s e2e test output to config node ") {
- test.copyTestsOutput(saltMaster,CONFORMANCE_IMAGE)
+ test.copyTestsOutput(pepperEnv,CONFORMANCE_IMAGE)
}
stage("Copy k8s e2e test output to host ") {
@@ -105,7 +107,7 @@
mkdir ${env.WORKSPACE}/${artifacts_dir}
'''
try {
- test.catTestsOutput(saltMaster,CONFORMANCE_IMAGE) >> ${env.WORKSPACE}/${artifacts_dir}/$CONFORMANCE_IMAGE
+ test.catTestsOutput(pepperEnv,CONFORMANCE_IMAGE) >> ${env.WORKSPACE}/${artifacts_dir}/$CONFORMANCE_IMAGE
} catch (InterruptedException x) {
echo "The job was aborted"
} finally {
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index 00b0e7f..7e8412b 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -17,8 +17,9 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def targetLiveSubset
def targetLiveAll
def minions
@@ -34,40 +35,40 @@
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
-def void runCommonCommands(target, command, args, check, salt, saltMaster, common) {
+def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
salt.printSaltCommandResult(out)
// wait until $check is in correct state
if ( check == "nodetool status" ) {
- salt.commandStatus(saltMaster, target, check, 'Status=Up')
+ salt.commandStatus(pepperEnv, target, check, 'Status=Up')
} else if ( check == "contrail-status" ) {
- salt.commandStatus(saltMaster, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+ salt.commandStatus(pepperEnv, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
}
- //out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
+ //out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
//salt.printSaltCommandResult(out)
//input message: "Please check the output of \'${check}\' and continue if it is correct."
}
node() {
- stage('Connect to Salt API') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true && !errorOccured) {
stage('Opencontrail controllers upgrade') {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
} catch (Exception er) {
errorOccured = true
common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -75,14 +76,14 @@
}
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
} catch (Exception er) {
common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
return
}
try {
- salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
} catch (Exception er) {
common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
return
@@ -92,29 +93,29 @@
check = 'nodetool status'
// ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
// ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
// ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
check = 'contrail-status'
// ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
// ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
// ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
try {
- salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+ salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
} catch (Exception er) {
common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
}
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
@@ -129,14 +130,14 @@
stage('Opencontrail analytics upgrade') {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
} catch (Exception er) {
errorOccured = true
common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -147,29 +148,29 @@
check = 'nodetool status'
// nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
// nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
// nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
check = 'contrail-status'
// nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
// nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
// nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
try {
- salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+ salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
} catch (Exception er) {
common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
}
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
}
}
@@ -179,7 +180,7 @@
try {
stage('List targeted compute servers') {
- minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+ minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -199,13 +200,13 @@
stage("Opencontrail compute upgrade on sample nodes") {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
} catch (Exception er) {
errorOccured = true
common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -215,21 +216,21 @@
args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
check = 'contrail-status'
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
salt.printSaltCommandResult(out)
try {
- salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+ salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
} catch (Exception er) {
common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
}
- salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
}
@@ -238,13 +239,13 @@
}
stage("Opencontrail compute upgrade on all targeted nodes") {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
} catch (Exception er) {
common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
return
@@ -253,20 +254,20 @@
args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
check = 'contrail-status'
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
salt.printSaltCommandResult(out)
try {
- salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+ salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
} catch (Exception er) {
common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
}
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
}
@@ -287,13 +288,13 @@
stage('Opencontrail controllers rollback') {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, 'I@opencontrail:control', 'linux.system.repo')
} catch (Exception er) {
errorOccured = true
common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -304,29 +305,29 @@
check = 'nodetool status'
// ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
// ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
// ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
check = 'contrail-status'
// ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
// ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
// ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
try {
- salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+ salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
} catch (Exception er) {
common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
}
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
@@ -341,13 +342,13 @@
stage('Opencontrail analytics rollback') {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'linux.system.repo')
} catch (Exception er) {
errorOccured = true
common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -358,29 +359,29 @@
check = 'nodetool status'
// nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
// nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
// nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
check = 'contrail-status'
// nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
// nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
// nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
try {
- salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+ salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
} catch (Exception er) {
common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
}
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
}
}
@@ -390,7 +391,7 @@
try {
stage('List targeted compute servers') {
- minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+ minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -410,13 +411,13 @@
stage("Opencontrail compute rollback on sample nodes") {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
} catch (Exception er) {
errorOccured = true
common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
@@ -426,20 +427,20 @@
args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
check = 'contrail-status'
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
salt.printSaltCommandResult(out)
try {
- salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+ salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
} catch (Exception er) {
common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
}
- salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
}
@@ -449,13 +450,13 @@
stage("Opencontrail compute upgrade on all targeted nodes") {
- oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
} catch (Exception er) {
common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
return
@@ -464,21 +465,21 @@
args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
check = 'contrail-status'
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
salt.printSaltCommandResult(out)
try {
- salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+ salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
} catch (Exception er) {
common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
}
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
}
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 535cde0..bbf07a4 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -10,8 +10,9 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def minions
def result
def command
@@ -21,12 +22,12 @@
node() {
try {
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+ minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -38,49 +39,49 @@
}
stage("Setup repositories") {
- salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true)
}
stage("Upgrade packages") {
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'pkg.upgrade', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], null, true)
}
stage("Setup networking") {
// Sync all of the modules from the salt master.
- salt.syncAll(saltMaster, targetLiveAll)
+ salt.syncAll(pepperEnv, targetLiveAll)
// Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.apply', ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply', ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
// Restart salt-minion to take effect.
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
// Configure networking excluding vhost0 interface.
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.apply', ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply', ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
// Kill unnecessary processes ifup/ifdown which is stuck from previous state linux.network.
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
// Restart networking to bring UP all interfaces.
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
}
stage("Highstate compute") {
// Execute highstate without state opencontrail.client.
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
// Apply nova state to remove libvirt default bridge virbr0.
- salt.enforceState(saltMaster, targetLiveAll, 'nova', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'nova', true)
// Execute highstate.
- salt.enforceHighstate(saltMaster, targetLiveAll, true)
+ salt.enforceHighstate(pepperEnv, targetLiveAll, true)
// Restart supervisor-vrouter.
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
// Apply salt,collectd to update information about current network interfaces.
- salt.enforceState(saltMaster, targetLiveAll, 'salt,collectd', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'salt,collectd', true)
}
} catch (Throwable e) {
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
index 095697d..3812871 100644
--- a/openstack-compute-upgrade.groovy
+++ b/openstack-compute-upgrade.groovy
@@ -12,8 +12,9 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def targetTestSubset
def targetLiveSubset
def targetLiveAll
@@ -27,12 +28,12 @@
node() {
try {
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+ minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -55,14 +56,14 @@
stage("Add new repos on test nodes") {
- salt.enforceState(saltMaster, targetTestSubset, 'linux.system.repo')
+ salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
}
opencontrail = null
try {
- opencontrail = salt.cmdRun(saltMaster, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
+ opencontrail = salt.cmdRun(pepperEnv, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
print(opencontrail)
} catch (Exception er) {
common.infoMsg("opencontrail is not used")
@@ -70,13 +71,13 @@
if(opencontrail != null) {
stage('Remove OC component from repos on test nodes') {
- salt.cmdRun(saltMaster, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.refresh_db', [], null, true)
+ salt.cmdRun(pepperEnv, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+ salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.refresh_db', [], null, true)
}
}
stage("List package upgrades") {
- salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
}
stage('Confirm upgrade on sample nodes') {
@@ -84,13 +85,13 @@
}
stage("Add new repos on sample nodes") {
- salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+ salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
}
if(opencontrail != null) {
stage('Remove OC component from repos on sample nodes') {
- salt.cmdRun(saltMaster, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'pkg.refresh_db', [], null, true)
+ salt.cmdRun(pepperEnv, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+ salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'pkg.refresh_db', [], null, true)
}
}
@@ -98,7 +99,7 @@
stage('Test upgrade on sample') {
try {
- salt.cmdRun(saltMaster, targetLiveSubset, args)
+ salt.cmdRun(pepperEnv, targetLiveSubset, args)
} catch (Exception er) {
print(er)
}
@@ -112,14 +113,14 @@
args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
openvswitch = null
try {
- openvswitch = salt.cmdRun(saltMaster, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
+ openvswitch = salt.cmdRun(pepperEnv, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
} catch (Exception er) {
common.infoMsg("openvswitch is not used")
}
@@ -128,21 +129,21 @@
args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
stage("Run salt states on sample nodes") {
- salt.enforceState(saltMaster, targetLiveSubset, ['nova', 'neutron'])
+ salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'neutron'])
}
} else {
stage("Run salt states on sample nodes") {
- salt.enforceState(saltMaster, targetLiveSubset, ['nova', 'linux.system.repo'])
+ salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'linux.system.repo'])
}
}
stage("Run Highstate on sample nodes") {
try {
- salt.enforceHighstate(saltMaster, targetLiveSubset)
+ salt.enforceHighstate(pepperEnv, targetLiveSubset)
} catch (Exception er) {
common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
}
@@ -155,20 +156,20 @@
}
stage("Add new repos on all targeted nodes") {
- salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
}
if(opencontrail != null) {
stage('Remove OC component from repos on all targeted nodes') {
- salt.cmdRun(saltMaster, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- salt.runSaltProcessStep(saltMaster, targetLiveAll, 'pkg.refresh_db', [], null, true)
+ salt.cmdRun(pepperEnv, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.refresh_db', [], null, true)
}
}
args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
@@ -176,21 +177,21 @@
args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
stage("Run salt states on all targeted nodes") {
- salt.enforceState(saltMaster, targetLiveAll, ['nova', 'neutron'])
+ salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'neutron'])
}
} else {
stage("Run salt states on all targeted nodes") {
- salt.enforceState(saltMaster, targetLiveAll, ['nova', 'linux.system.repo'])
+ salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'linux.system.repo'])
}
}
stage("Run Highstate on all targeted nodes") {
try {
- salt.enforceHighstate(saltMaster, targetLiveAll)
+ salt.enforceHighstate(pepperEnv, targetLiveAll)
} catch (Exception er) {
common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
}
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 9680f24..b2bd621 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -12,14 +12,14 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-
-def saltMaster
+def pepperEnv = "pepperEnv"
node() {
- stage('Connect to Salt API') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
if (STAGE_TEST_UPGRADE.toBoolean() == true) {
@@ -27,175 +27,175 @@
try {
- salt.enforceState(saltMaster, 'I@salt:master', 'reclass')
+ salt.enforceState(pepperEnv, 'I@salt:master', 'reclass')
} catch (Exception e) {
common.warningMsg("Some parts of Reclass state failed. The most probable reasons were uncommited changes. We should continue to run")
}
try {
- salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
} catch (Exception e) {
common.warningMsg("No response from some minions. We should continue to run")
}
try {
- salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
} catch (Exception e) {
common.warningMsg("No response from some minions. We should continue to run")
}
- def _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+ def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
def domain = _pillar['return'][0].values()[0].values()[0]
print(_pillar)
print(domain)
// read backupninja variable
- _pillar = salt.getPillar(saltMaster, 'I@backupninja:client', '_param:backupninja_backup_host')
+ _pillar = salt.getPillar(pepperEnv, 'I@backupninja:client', '_param:backupninja_backup_host')
def backupninja_backup_host = _pillar['return'][0].values()[0]
print(_pillar)
print(backupninja_backup_host)
- _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+ _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
def kvm01 = _pillar['return'][0].values()[0].values()[0]
print(_pillar)
print(kvm01)
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
def upgNodeProvider = _pillar['return'][0].values()[0]
print(_pillar)
print(upgNodeProvider)
- salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
try {
- salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d upg01.${domain} -y")
+ salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d upg01.${domain} -y")
} catch (Exception e) {
common.warningMsg("upg01.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
}
// salt 'kvm02*' state.sls salt.control
- salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
+ salt.enforceState(pepperEnv, "${upgNodeProvider}", 'salt.control')
// wait until upg node is registered in salt-key
- salt.minionPresent(saltMaster, 'I@salt:master', 'upg01')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'upg01')
// salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.refresh_pillar', [], null, true)
// salt '*' saltutil.sync_all
- salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.sync_all', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'upg*', 'saltutil.sync_all', [], null, true)
// salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
try {
- salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+ salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
} catch (Exception e) {
common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
}
- salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+ salt.enforceState(pepperEnv, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
// salt "upg*" state.sls rabbitmq
- salt.enforceState(saltMaster, 'upg*', ['rabbitmq', 'memcached'])
+ salt.enforceState(pepperEnv, 'upg*', ['rabbitmq', 'memcached'])
try {
- salt.enforceState(saltMaster, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
+ salt.enforceState(pepperEnv, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
} catch (Exception e) {
common.warningMsg('salt-minion was restarted. We should continue to run')
}
try {
- salt.enforceState(saltMaster, 'I@backupninja:server', ['salt.minion'])
+ salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
} catch (Exception e) {
common.warningMsg('salt-minion was restarted. We should continue to run')
}
// salt '*' state.apply salt.minion.grains
- //salt.enforceState(saltMaster, '*', 'salt.minion.grains')
+ //salt.enforceState(pepperEnv, '*', 'salt.minion.grains')
// salt -C 'I@backupninja:server' state.sls backupninja
- salt.enforceState(saltMaster, 'I@backupninja:server', 'backupninja')
+ salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
// salt -C 'I@backupninja:client' state.sls backupninja
- salt.enforceState(saltMaster, 'I@backupninja:client', 'backupninja')
- salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
+ salt.enforceState(pepperEnv, 'I@backupninja:client', 'backupninja')
+ salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
try {
- salt.cmdRun(saltMaster, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
+ salt.cmdRun(pepperEnv, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
} catch (Exception e) {
common.warningMsg('The ARP entry does not exist. We should continue to run.')
}
- salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
- salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
- salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+ salt.runSaltProcessStep(pepperEnv, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
+ salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
+ salt.cmdRun(pepperEnv, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
- salt.enforceState(saltMaster, 'I@xtrabackup:server', 'xtrabackup')
- salt.enforceState(saltMaster, 'I@xtrabackup:client', 'openssh.client')
- salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+ salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
+ salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
+ salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
- def databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
+ def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
if(databases && databases != ""){
def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
for( i = 0; i < databasesList.size(); i++){
if(databasesList[i].toLowerCase().contains('upgrade')){
- salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
common.warningMsg("removing database ${databasesList[i]}")
- salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
}
}
- salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
+ salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
}else{
common.errorMsg("No _upgrade databases were returned")
}
try {
- salt.enforceState(saltMaster, 'upg*', 'keystone.server')
- salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+ salt.enforceState(pepperEnv, 'upg*', 'keystone.server')
+ salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
} catch (Exception e) {
common.warningMsg('Restarting Apache2')
- salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'upg*', 'service.restart', ['apache2'], null, true)
}
try {
- salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+ salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
} catch (Exception e) {
common.warningMsg('running keystone.client state again')
- salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+ salt.enforceState(pepperEnv, 'upg*', 'keystone.client')
}
try {
- salt.enforceState(saltMaster, 'upg*', 'glance')
+ salt.enforceState(pepperEnv, 'upg*', 'glance')
} catch (Exception e) {
common.warningMsg('running glance state again')
- salt.enforceState(saltMaster, 'upg*', 'glance')
+ salt.enforceState(pepperEnv, 'upg*', 'glance')
}
- salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+ salt.enforceState(pepperEnv, 'upg*', 'keystone.server')
try {
- salt.enforceState(saltMaster, 'upg*', 'nova')
+ salt.enforceState(pepperEnv, 'upg*', 'nova')
} catch (Exception e) {
common.warningMsg('running nova state again')
- salt.enforceState(saltMaster, 'upg*', 'nova')
+ salt.enforceState(pepperEnv, 'upg*', 'nova')
}
// run nova state again as sometimes nova does not enforce itself for some reason
try {
- salt.enforceState(saltMaster, 'upg*', 'nova')
+ salt.enforceState(pepperEnv, 'upg*', 'nova')
} catch (Exception e) {
common.warningMsg('running nova state again')
- salt.enforceState(saltMaster, 'upg*', 'nova')
+ salt.enforceState(pepperEnv, 'upg*', 'nova')
}
try {
- salt.enforceState(saltMaster, 'upg*', 'cinder')
+ salt.enforceState(pepperEnv, 'upg*', 'cinder')
} catch (Exception e) {
common.warningMsg('running cinder state again')
- salt.enforceState(saltMaster, 'upg*', 'cinder')
+ salt.enforceState(pepperEnv, 'upg*', 'cinder')
}
try {
- salt.enforceState(saltMaster, 'upg*', 'neutron')
+ salt.enforceState(pepperEnv, 'upg*', 'neutron')
} catch (Exception e) {
common.warningMsg('running neutron state again')
- salt.enforceState(saltMaster, 'upg*', 'neutron')
+ salt.enforceState(pepperEnv, 'upg*', 'neutron')
}
try {
- salt.enforceState(saltMaster, 'upg*', 'heat')
+ salt.enforceState(pepperEnv, 'upg*', 'heat')
} catch (Exception e) {
common.warningMsg('running heat state again')
- salt.enforceState(saltMaster, 'upg*', 'heat')
+ salt.enforceState(pepperEnv, 'upg*', 'heat')
}
- salt.cmdRun(saltMaster, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+ salt.cmdRun(pepperEnv, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
stage('Ask for manual confirmation') {
@@ -209,163 +209,163 @@
stage('Real upgrade') {
// # actual upgrade
- _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+ _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
domain = _pillar['return'][0].values()[0].values()[0]
print(_pillar)
print(domain)
- _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+ _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
kvm01 = _pillar['return'][0].values()[0].values()[0]
print(_pillar)
print(kvm01)
def errorOccured = false
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
def ctl01NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
def ctl02NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
def ctl03NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
def prx01NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
def prx02NodeProvider = _pillar['return'][0].values()[0]
- salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
try {
- salt.cmdRun(saltMaster, "${prx01NodeProvider}", "[ ! -f /root/prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak")
+ salt.cmdRun(pepperEnv, "${prx01NodeProvider}", "[ ! -f /root/prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('File already exists')
}
try {
- salt.cmdRun(saltMaster, "${prx02NodeProvider}", "[ ! -f /root/prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak")
+ salt.cmdRun(pepperEnv, "${prx02NodeProvider}", "[ ! -f /root/prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('File already exists')
}
try {
- salt.cmdRun(saltMaster, "${ctl01NodeProvider}", "[ ! -f /root/ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak")
+ salt.cmdRun(pepperEnv, "${ctl01NodeProvider}", "[ ! -f /root/ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('File already exists')
}
try {
- salt.cmdRun(saltMaster, "${ctl02NodeProvider}", "[ ! -f /root/ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak")
+ salt.cmdRun(pepperEnv, "${ctl02NodeProvider}", "[ ! -f /root/ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('File already exists')
}
try {
- salt.cmdRun(saltMaster, "${ctl03NodeProvider}", "[ ! -f /root/ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak")
+ salt.cmdRun(pepperEnv, "${ctl03NodeProvider}", "[ ! -f /root/ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('File already exists')
}
- salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.undefine', ["prx01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.undefine', ["prx02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.undefine', ["ctl01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.undefine', ["prx01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.undefine', ["prx02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.undefine', ["ctl01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
- salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
+ salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
try {
- salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+ salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
} catch (Exception e) {
common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
}
// salt 'kvm*' state.sls salt.control
- salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
+ salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
// wait until ctl and prx nodes are registered in salt-key
- salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
- salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
- salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
- salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
- salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl01')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl02')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl03')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'prx01')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'prx02')
// salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true)
// salt '*' saltutil.sync_all
- salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
try {
- salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+ salt.enforceState(pepperEnv, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
} catch (Exception e) {
common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
}
- salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+ salt.enforceState(pepperEnv, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
// salt 'ctl*' state.sls keepalived
// salt 'ctl*' state.sls haproxy
- salt.enforceState(saltMaster, 'ctl*', ['keepalived', 'haproxy'])
+ salt.enforceState(pepperEnv, 'ctl*', ['keepalived', 'haproxy'])
// salt 'ctl*' service.restart rsyslog
- salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['rsyslog'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['rsyslog'], null, true)
// salt "ctl*" state.sls memcached
// salt "ctl*" state.sls keystone.server
try {
try {
- salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
- salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
+ salt.enforceState(pepperEnv, 'ctl*', ['memcached', 'keystone.server'])
+ salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['apache2'], null, true)
} catch (Exception e) {
common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
- salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
- salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+ salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['apache2'], null, true)
+ salt.enforceState(pepperEnv, 'ctl*', 'keystone.server')
}
// salt 'ctl01*' state.sls keystone.client
try {
- salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+ salt.enforceState(pepperEnv, 'I@keystone:client and ctl*', 'keystone.client')
} catch (Exception e) {
common.warningMsg('running keystone.client state again')
- salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+ salt.enforceState(pepperEnv, 'I@keystone:client and ctl*', 'keystone.client')
}
try {
- salt.enforceState(saltMaster, 'ctl*', 'glance')
+ salt.enforceState(pepperEnv, 'ctl*', 'glance')
} catch (Exception e) {
common.warningMsg('running glance state again')
- salt.enforceState(saltMaster, 'ctl*', 'glance')
+ salt.enforceState(pepperEnv, 'ctl*', 'glance')
} // salt 'ctl*' state.sls glusterfs.client
- salt.enforceState(saltMaster, 'ctl*', 'glusterfs.client')
+ salt.enforceState(pepperEnv, 'ctl*', 'glusterfs.client')
// salt 'ctl*' state.sls keystone.server
- salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+ salt.enforceState(pepperEnv, 'ctl*', 'keystone.server')
// salt 'ctl*' state.sls nova
try {
- salt.enforceState(saltMaster, 'ctl*', 'nova')
+ salt.enforceState(pepperEnv, 'ctl*', 'nova')
} catch (Exception e) {
common.warningMsg('running nova state again')
- salt.enforceState(saltMaster, 'ctl*', 'nova')
+ salt.enforceState(pepperEnv, 'ctl*', 'nova')
}
// salt 'ctl*' state.sls cinder
try {
- salt.enforceState(saltMaster, 'ctl*', 'cinder')
+ salt.enforceState(pepperEnv, 'ctl*', 'cinder')
} catch (Exception e) {
common.warningMsg('running cinder state again')
- salt.enforceState(saltMaster, 'ctl*', 'cinder')
+ salt.enforceState(pepperEnv, 'ctl*', 'cinder')
}
try {
- salt.enforceState(saltMaster, 'ctl*', 'neutron')
+ salt.enforceState(pepperEnv, 'ctl*', 'neutron')
} catch (Exception e) {
common.warningMsg('running neutron state again')
- salt.enforceState(saltMaster, 'ctl*', 'neutron')
+ salt.enforceState(pepperEnv, 'ctl*', 'neutron')
}
// salt 'ctl*' state.sls heat
try {
- salt.enforceState(saltMaster, 'ctl*', 'heat')
+ salt.enforceState(pepperEnv, 'ctl*', 'heat')
} catch (Exception e) {
common.warningMsg('running heat state again')
- salt.enforceState(saltMaster, 'ctl*', 'heat')
+ salt.enforceState(pepperEnv, 'ctl*', 'heat')
}
} catch (Exception e) {
@@ -374,53 +374,53 @@
// database restore section
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
} catch (Exception er) {
common.warningMsg('Mysql service already stopped')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
} catch (Exception er) {
common.warningMsg('Mysql service already stopped')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+ salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
} catch (Exception er) {
common.warningMsg('Files are not present')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
} catch (Exception er) {
common.warningMsg('Directory already exists')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
} catch (Exception er) {
common.warningMsg('Directory already empty')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
} catch (Exception er) {
common.warningMsg('Files were already moved')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
} catch (Exception er) {
common.warningMsg('File is not present')
}
- salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
- _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+ salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
backup_dir = _pillar['return'][0].values()[0]
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
print(backup_dir)
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
- salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
// wait until mysql service on galera master is up
- salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+ salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
- salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
//
common.errorMsg("Stage Real control upgrade failed")
@@ -430,7 +430,7 @@
ceph = null
try {
- ceph = salt.cmdRun(saltMaster, 'ctl*', "salt-call grains.item roles | grep ceph.client")
+ ceph = salt.cmdRun(pepperEnv, 'ctl*', "salt-call grains.item roles | grep ceph.client")
} catch (Exception er) {
common.infoMsg("Ceph is not used")
@@ -438,42 +438,42 @@
if(ceph != null) {
try {
- salt.enforceState(saltMaster, 'ctl*', 'ceph.client')
+ salt.enforceState(pepperEnv, 'ctl*', 'ceph.client')
} catch (Exception er) {
common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
}
}
// salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
- salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
- salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
// salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
// salt 'ctl*' state.sls keepalived
// salt 'prx*' state.sls keepalived
- salt.enforceState(saltMaster, 'prx*', 'keepalived')
+ salt.enforceState(pepperEnv, 'prx*', 'keepalived')
// salt 'prx*' state.sls horizon
- salt.enforceState(saltMaster, 'prx*', 'horizon')
+ salt.enforceState(pepperEnv, 'prx*', 'horizon')
// salt 'prx*' state.sls nginx
- salt.enforceState(saltMaster, 'prx*', 'nginx')
+ salt.enforceState(pepperEnv, 'prx*', 'nginx')
// salt "prx*" state.sls memcached
- salt.enforceState(saltMaster, 'prx*', 'memcached')
+ salt.enforceState(pepperEnv, 'prx*', 'memcached')
try {
- salt.enforceHighstate(saltMaster, 'ctl*')
+ salt.enforceHighstate(pepperEnv, 'ctl*')
} catch (Exception er) {
common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
}
try {
- salt.enforceHighstate(saltMaster, 'prx*')
+ salt.enforceHighstate(pepperEnv, 'prx*')
} catch (Exception er) {
common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
}
- salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+ salt.cmdRun(pepperEnv, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
}
}
@@ -491,110 +491,110 @@
input message: "Do you really want to continue with the rollback?"
}
- _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+ _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
domain = _pillar['return'][0].values()[0].values()[0]
print(_pillar)
print(domain)
- _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+ _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
kvm01 = _pillar['return'][0].values()[0].values()[0]
print(_pillar)
print(kvm01)
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
def ctl01NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
def ctl02NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
def ctl03NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
def prx01NodeProvider = _pillar['return'][0].values()[0]
- _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+ _pillar = salt.getPillar(pepperEnv, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
def prx02NodeProvider = _pillar['return'][0].values()[0]
- salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'file.copy', ["/root/prx01.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx01.${domain}/system.qcow2"], null, true)
- salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'file.copy', ["/root/prx02.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx02.${domain}/system.qcow2"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'file.copy', ["/root/ctl01.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl01.${domain}/system.qcow2"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'file.copy', ["/root/ctl02.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl02.${domain}/system.qcow2"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'file.copy', ["/root/ctl03.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl03.${domain}/system.qcow2"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'file.copy', ["/root/prx01.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx01.${domain}/system.qcow2"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'file.copy', ["/root/prx02.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx02.${domain}/system.qcow2"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'file.copy', ["/root/ctl01.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl01.${domain}/system.qcow2"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'file.copy', ["/root/ctl02.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl02.${domain}/system.qcow2"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'file.copy', ["/root/ctl03.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl03.${domain}/system.qcow2"], null, true)
try {
- salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+ salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
} catch (Exception e) {
common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
}
// database restore section
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
} catch (Exception e) {
common.warningMsg('Mysql service already stopped')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
} catch (Exception e) {
common.warningMsg('Mysql service already stopped')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+ salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
} catch (Exception e) {
common.warningMsg('Files are not present')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
} catch (Exception e) {
common.warningMsg('Directory already empty')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
} catch (Exception e) {
common.warningMsg('File is not present')
}
- salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
- _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+ salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
backup_dir = _pillar['return'][0].values()[0]
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
print(backup_dir)
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
- salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
// wait until mysql service on galera master is up
- salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+ salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
- salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
//
- salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.start', ["ctl02.${domain}"], null, true)
- salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.start', ["ctl03.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl02NodeProvider}", 'virt.start', ["ctl02.${domain}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, "${ctl03NodeProvider}", 'virt.start', ["ctl03.${domain}"], null, true)
// salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'cmp*', 'service.restart', ['nova-compute'], null, true)
// wait until ctl and prx nodes are registered in salt-key
- salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
- salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
- salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
- salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
- salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl01')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl02')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'ctl03')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'prx01')
+ salt.minionPresent(pepperEnv, 'I@salt:master', 'prx02')
- salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
- salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
- salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
+ salt.cmdRun(pepperEnv, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
}
}
}
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
index 9cfa215..394ca98 100644
--- a/ovs-gateway-upgrade.groovy
+++ b/ovs-gateway-upgrade.groovy
@@ -12,8 +12,9 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def targetTestSubset
def targetLiveSubset
def targetLiveAll
@@ -27,12 +28,12 @@
node() {
try {
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+ minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -55,11 +56,11 @@
stage("Add new repos on test nodes") {
- salt.enforceState(saltMaster, targetTestSubset, 'linux.system.repo')
+ salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
}
stage("List package upgrades") {
- salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
}
stage('Confirm upgrade on sample nodes') {
@@ -67,14 +68,14 @@
}
stage("Add new repos on sample nodes") {
- salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+ salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
}
args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
stage('Test upgrade on sample') {
try {
- salt.cmdRun(saltMaster, targetLiveSubset, args)
+ salt.cmdRun(pepperEnv, targetLiveSubset, args)
} catch (Exception er) {
print(er)
}
@@ -88,23 +89,23 @@
args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
stage("Run Neutron state on sample nodes") {
- salt.enforceState(saltMaster, targetLiveSubset, ['neutron'])
+ salt.enforceState(pepperEnv, targetLiveSubset, ['neutron'])
}
stage("Run Highstate on sample nodes") {
try {
- salt.enforceHighstate(saltMaster, targetLiveSubset)
+ salt.enforceHighstate(pepperEnv, targetLiveSubset)
} catch (Exception er) {
common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
}
@@ -117,29 +118,29 @@
}
stage("Add new repos on all targeted nodes") {
- salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
}
args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
salt.printSaltCommandResult(out)
}
stage("Run Neutron state on all targeted nodes") {
- salt.enforceState(saltMaster, targetLiveAll, ['neutron'])
+ salt.enforceState(pepperEnv, targetLiveAll, ['neutron'])
}
stage("Run Highstate on all targeted nodes") {
try {
- salt.enforceHighstate(saltMaster, targetLiveAll)
+ salt.enforceHighstate(pepperEnv, targetLiveAll)
} catch (Exception er) {
common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
}
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 4b554b0..5fff159 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -9,14 +9,14 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-
-def saltMaster
+def pepperEnv = "pepperEnv"
node() {
- stage('Connect to Salt API') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('Start restore') {
@@ -27,54 +27,54 @@
}
// Cassandra restore section
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
} catch (Exception er) {
common.warningMsg('Supervisor-database service already stopped')
}
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
} catch (Exception er) {
common.warningMsg('Directory already exists')
}
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
} catch (Exception er) {
common.warningMsg('Files were already moved')
}
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
} catch (Exception er) {
common.warningMsg('Directory already empty')
}
- _pillar = salt.getPillar(saltMaster, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
+ _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
backup_dir = _pillar['return'][0].values()[0]
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
print(backup_dir)
- salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
- salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
// wait until supervisor-database service is up
- salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+ salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
sleep(5)
// performs restore
- salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
- salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
+ salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
// wait until supervisor-database service is up
- salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
- salt.commandStatus(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+ salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+ salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
sleep(5)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
// wait until contrail-status is up
- salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+ salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "nodetool status")
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
}
}
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index 3ead8aa..54ae8c6 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -9,14 +9,14 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-
-def saltMaster
+def pepperEnv = "pepperEnv"
node() {
- stage('Connect to Salt API') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('Start restore') {
@@ -27,63 +27,63 @@
}
// Zookeeper restore section
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
} catch (Exception er) {
common.warningMsg('Supervisor-config service already stopped')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-control'], null, true)
} catch (Exception er) {
common.warningMsg('Supervisor-control service already stopped')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['zookeeper'], null, true)
} catch (Exception er) {
common.warningMsg('Zookeeper service already stopped')
}
//sleep(5)
// wait until zookeeper service is down
- salt.commandStatus(saltMaster, 'I@opencontrail:control', 'service zookeeper status', 'stop')
+ salt.commandStatus(pepperEnv, 'I@opencontrail:control', 'service zookeeper status', 'stop')
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
} catch (Exception er) {
common.warningMsg('Directory already exists')
}
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/zookeeper/version-2/* /root/zookeeper/zookeeper.bak")
} catch (Exception er) {
common.warningMsg('Files were already moved')
}
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/zookeeper/version-2/*")
} catch (Exception er) {
common.warningMsg('Directory already empty')
}
- _pillar = salt.getPillar(saltMaster, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
+ _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", 'zookeeper:backup:backup_dir')
backup_dir = _pillar['return'][0].values()[0]
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/zookeeper' }
print(backup_dir)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
// performs restore
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
- salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
// wait until contrail-status is up
- salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+ salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
try {
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "echo stat | nc localhost 2181")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "echo stat | nc localhost 2181")
} catch (Exception er) {
common.warningMsg('Check which node is zookeeper leader')
}
- salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
}
}
diff --git a/test-run-rally.groovy b/test-run-rally.groovy
index 4cf3bd3..d62d9be 100644
--- a/test-run-rally.groovy
+++ b/test-run-rally.groovy
@@ -17,9 +17,9 @@
common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
node("python") {
try {
@@ -27,9 +27,8 @@
//
// Prepare connection
//
- stage ('Connect to salt master') {
- // Connect to Salt master
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
//
@@ -37,14 +36,14 @@
//
stage('Run OpenStack Rally scenario') {
- test.runRallyScenarios(saltMaster, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
+ test.runRallyScenarios(pepperEnv, IMAGE_LINK, TEST_TARGET, RALLY_SCENARIO, "/home/rally/rally_reports/",
DO_CLEANUP_RESOURCES)
}
stage('Copy test reports') {
- test.copyTempestResults(saltMaster, TEST_TARGET)
+ test.copyTempestResults(pepperEnv, TEST_TARGET)
}
stage('Archiving test artifacts') {
- test.archiveRallyArtifacts(saltMaster, TEST_TARGET)
+ test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
}
} catch (Throwable e) {
currentBuild.result = 'FAILURE'
@@ -52,8 +51,8 @@
} finally {
if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
stage('Cleanup reports and container') {
- test.removeReports(saltMaster, TEST_TARGET, "rally_reports", 'rally_reports.tar')
- test.removeDockerContainer(saltMaster, TEST_TARGET, IMAGE_LINK)
+ test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+ test.removeDockerContainer(pepperEnv, TEST_TARGET, IMAGE_LINK)
}
}
}
diff --git a/test-run-tempest.groovy b/test-run-tempest.groovy
index 4785992..9da8b16 100644
--- a/test-run-tempest.groovy
+++ b/test-run-tempest.groovy
@@ -17,19 +17,15 @@
common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
node("python") {
try {
- //
- // Prepare connection
- //
- stage ('Connect to salt master') {
- // Connect to Salt master
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
//
@@ -37,14 +33,14 @@
//
stage('Run OpenStack Tempest tests') {
- test.runTempestTests(saltMaster, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
+ test.runTempestTests(pepperEnv, IMAGE_LINK, TEST_TARGET, TEST_TEMPEST_PATTERN, "/home/rally/rally_reports/",
DO_CLEANUP_RESOURCES)
}
stage('Copy test reports') {
- test.copyTempestResults(saltMaster, TEST_TARGET)
+ test.copyTempestResults(pepperEnv, TEST_TARGET)
}
stage('Archiving test artifacts') {
- test.archiveRallyArtifacts(saltMaster, TEST_TARGET)
+ test.archiveRallyArtifacts(pepperEnv, TEST_TARGET)
}
} catch (Throwable e) {
currentBuild.result = 'FAILURE'
@@ -52,8 +48,8 @@
} finally {
if (CLEANUP_REPORTS_AND_CONTAINER.toBoolean()) {
stage('Cleanup reports and container') {
- test.removeReports(saltMaster, TEST_TARGET, "rally_reports", 'rally_reports.tar')
- test.removeDockerContainer(saltMaster, TEST_TARGET, IMAGE_LINK)
+ test.removeReports(pepperEnv, TEST_TARGET, "rally_reports", 'rally_reports.tar')
+ test.removeDockerContainer(pepperEnv, TEST_TARGET, IMAGE_LINK)
}
}
}
diff --git a/test-service.groovy b/test-service.groovy
index 232b386..a03865a 100644
--- a/test-service.groovy
+++ b/test-service.groovy
@@ -20,19 +20,15 @@
git = new com.mirantis.mk.Git()
salt = new com.mirantis.mk.Salt()
test = new com.mirantis.mk.Test()
+def python = new com.mirantis.mk.Python()
-// Define global variables
-def saltMaster
+def pepperEnv = "pepperEnv"
node("python") {
try {
- //
- // Prepare connection
- //
- stage ('Connect to salt master') {
- // Connect to Salt master
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
//
@@ -46,11 +42,11 @@
def output_file = image.replaceAll('/', '-') + '.output'
// run image
- test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
+ test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
// collect output
sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+ file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
writeFile file: "${artifacts_dir}${output_file}", text: file_content
sh "cat ${artifacts_dir}${output_file}"
@@ -63,11 +59,11 @@
def output_file = image.replaceAll('/', '-') + '.output'
// run image
- test.runConformanceTests(saltMaster, 'ctl01*', TEST_K8S_API_SERVER, image)
+ test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
// collect output
sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+ file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
writeFile file: "${artifacts_dir}${output_file}", text: file_content
sh "cat ${artifacts_dir}${output_file}"
@@ -78,14 +74,14 @@
if (common.checkContains('TEST_SERVICE', 'openstack')) {
if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
- test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
+ test.install_docker(pepperEnv, TEST_TEMPEST_TARGET)
}
stage('Run OpenStack tests') {
- test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
+ test.runTempestTests(pepperEnv, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
}
- writeFile(file: 'report.xml', text: salt.getFileContent(saltMaster, TEST_TEMPEST_TARGET, '/root/report.xml'))
+ writeFile(file: 'report.xml', text: salt.getFileContent(pepperEnv, TEST_TEMPEST_TARGET, '/root/report.xml'))
junit(keepLongStdio: true, testResults: 'report.xml', healthScaleFactor: Double.parseDouble(TEST_JUNIT_RATIO))
def testResults = test.collectJUnitResults(currentBuild.rawBuild.getAction(hudson.tasks.test.AbstractTestResultAction.class))
if(testResults){
diff --git a/update-jenkins-master-jobs.groovy b/update-jenkins-master-jobs.groovy
index 56edb10..65a16ca 100644
--- a/update-jenkins-master-jobs.groovy
+++ b/update-jenkins-master-jobs.groovy
@@ -10,20 +10,21 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def target = ['expression': TARGET_SERVERS, 'type': 'compound']
def result
node("python") {
try {
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('Update Jenkins jobs') {
- result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'jenkins.client')
+ result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'jenkins.client')
salt.checkResult(result)
}
diff --git a/update-package.groovy b/update-package.groovy
index c946123..552a361 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -14,8 +14,9 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def targetTestSubset
def targetLiveSubset
def targetLiveAll
@@ -28,12 +29,12 @@
node() {
try {
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+ minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -54,7 +55,7 @@
stage("List package upgrades") {
common.infoMsg("Listing all the packages that have a new update available on test nodes: ${targetTestSubset}")
- salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
if(TARGET_PACKAGES != "" && TARGET_PACKAGES != "*"){
common.infoMsg("Note that only the ${TARGET_PACKAGES} would be installed from the above list of available updates on the ${targetTestSubset}")
}
@@ -88,7 +89,7 @@
}
stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
salt.printSaltCommandResult(out)
}
@@ -99,7 +100,7 @@
}
stage('Apply package upgrades on all nodes') {
- out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
salt.printSaltCommandResult(out)
}
diff --git a/update-reclass-metadata.groovy b/update-reclass-metadata.groovy
index 6fb539a..80a71ec 100644
--- a/update-reclass-metadata.groovy
+++ b/update-reclass-metadata.groovy
@@ -10,21 +10,22 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def target = ['expression': TARGET_SERVERS, 'type': 'compound']
def result
node("python") {
try {
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('Update Reclass model') {
- result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'reclass.storage')
- result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'reclass.storage.node')
+ result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage')
+ result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'reclass.storage.node')
salt.checkResult(result)
}
diff --git a/update-salt-master-formulas.groovy b/update-salt-master-formulas.groovy
index f3e7d1c..0cb995c 100644
--- a/update-salt-master-formulas.groovy
+++ b/update-salt-master-formulas.groovy
@@ -10,20 +10,21 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def target = ['expression': TARGET_SERVERS, 'type': 'compound']
def result
node("python") {
try {
- stage('Connect to Salt master') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('Update Salt formulas') {
- result = salt.runSaltCommand(saltMaster, 'local', target, 'state.apply', null, 'salt.master.env')
+ result = salt.runSaltCommand(pepperEnv, 'local', target, 'state.apply', null, 'salt.master.env')
salt.checkResult(result)
}
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index d412877..9e13f7c 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -31,18 +31,19 @@
salt = new com.mirantis.mk.Salt()
test = new com.mirantis.mk.Test()
validate = new com.mirantis.mcp.Validate()
+def python = new com.mirantis.mk.Python()
-def saltMaster
+def pepperEnv = "pepperEnv"
def artifacts_dir = 'validation_artifacts/'
node() {
try{
- stage('Initialization') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('Configure') {
- validate.installDocker(saltMaster, TARGET_NODE)
+ validate.installDocker(pepperEnv, TARGET_NODE)
if (ACCUMULATE_RESULTS.toBoolean() == false) {
sh "rm -r ${artifacts_dir}"
}
@@ -51,12 +52,12 @@
"-e spt_floating_network=${SPT_FLOATING_NETWORK} " +
"-e spt_image=${SPT_IMAGE} -e spt_user=${SPT_USER} " +
"-e spt_flavor=${SPT_FLAVOR} -e spt_availability_zone=${SPT_AVAILABILITY_ZONE} "
- validate.runContainerConfiguration(saltMaster, TEST_IMAGE, TARGET_NODE, artifacts_dir, spt_variables)
+ validate.runContainerConfiguration(pepperEnv, TEST_IMAGE, TARGET_NODE, artifacts_dir, spt_variables)
}
stage('Run Tempest tests') {
if (RUN_TEMPEST_TESTS.toBoolean() == true) {
- validate.runTempestTests(saltMaster, TARGET_NODE, artifacts_dir, TEMPEST_TEST_SET)
+ validate.runTempestTests(pepperEnv, TARGET_NODE, artifacts_dir, TEMPEST_TEST_SET)
} else {
common.infoMsg("Skipping Tempest tests")
}
@@ -64,7 +65,7 @@
stage('Run Rally tests') {
if (RUN_RALLY_TESTS.toBoolean() == true) {
- validate.runRallyTests(saltMaster, TARGET_NODE, artifacts_dir)
+ validate.runRallyTests(pepperEnv, TARGET_NODE, artifacts_dir)
} else {
common.infoMsg("Skipping Rally tests")
}
@@ -72,7 +73,7 @@
stage('Run SPT tests') {
if (RUN_SPT_TESTS.toBoolean() == true) {
- validate.runSptTests(saltMaster, TARGET_NODE, artifacts_dir)
+ validate.runSptTests(pepperEnv, TARGET_NODE, artifacts_dir)
} else {
common.infoMsg("Skipping SPT tests")
}
@@ -84,9 +85,9 @@
def output_file = 'k8s-bootstrap-tests.txt'
def containerName = 'conformance_tests'
def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
- test.runConformanceTests(saltMaster, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
+ test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
- def file_content = validate.getFileContent(saltMaster, TEST_K8S_NODE, outfile)
+ def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
writeFile file: "${artifacts_dir}${output_file}", text: file_content
} else {
common.infoMsg("Skipping k8s bootstrap tests")
@@ -99,9 +100,9 @@
def output_file = 'report-k8s-e2e-tests.txt'
def containerName = 'conformance_tests'
def outfile = "/tmp/" + image.replaceAll('/', '-') + '.output'
- test.runConformanceTests(saltMaster, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
+ test.runConformanceTests(pepperEnv, TEST_K8S_NODE, TEST_K8S_API_SERVER, image)
- def file_content = validate.getFileContent(saltMaster, TEST_K8S_NODE, outfile)
+ def file_content = validate.getFileContent(pepperEnv, TEST_K8S_NODE, outfile)
writeFile file: "${artifacts_dir}${output_file}", text: file_content
} else {
common.infoMsg("Skipping k8s conformance e2e tests")
@@ -110,7 +111,7 @@
stage('Generate report') {
if (GENERATE_REPORT.toBoolean() == true) {
print("Generating html test report ...")
- validate.generateTestReport(saltMaster, TARGET_NODE, artifacts_dir)
+ validate.generateTestReport(pepperEnv, TARGET_NODE, artifacts_dir)
} else {
common.infoMsg("Skipping report generation")
}
@@ -124,6 +125,6 @@
currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
throw e
} finally {
- validate.runCleanup(saltMaster, TARGET_NODE, artifacts_dir)
+ validate.runCleanup(pepperEnv, TARGET_NODE, artifacts_dir)
}
}
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
index 303c282..ba51972 100644
--- a/xtrabackup-restore-mysql-db.groovy
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -9,14 +9,14 @@
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
-
-def saltMaster
+def pepperEnv = "pepperEnv"
node() {
- stage('Connect to Salt API') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
stage('Start restore') {
@@ -27,59 +27,59 @@
}
// database restore section
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
} catch (Exception er) {
common.warningMsg('Mysql service already stopped')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.stop', ['mysql'], null, true)
} catch (Exception er) {
common.warningMsg('Mysql service already stopped')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+ salt.cmdRun(pepperEnv, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
} catch (Exception er) {
common.warningMsg('Files are not present')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "mkdir -p /root/mysql/mysql.bak")
} catch (Exception er) {
common.warningMsg('Directory already exists')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
} catch (Exception er) {
common.warningMsg('Files were already moved')
}
try {
- salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "rm -rf /var/lib/mysql/*")
} catch (Exception er) {
common.warningMsg('Directory already empty')
}
try {
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
} catch (Exception er) {
common.warningMsg('File is not present')
}
- salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
- _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+ salt.cmdRun(pepperEnv, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ _pillar = salt.getPillar(pepperEnv, "I@galera:master", 'xtrabackup:client:backup_dir')
backup_dir = _pillar['return'][0].values()[0]
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
print(backup_dir)
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
- salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
// wait until mysql service on galera master is up
- salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+ salt.commandStatus(pepperEnv, 'I@galera:master', 'service mysql status', 'running')
- salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:slave', 'service.start', ['mysql'], null, true)
try {
- salt.commandStatus(saltMaster, 'I@galera:slave', 'service mysql status', 'running')
+ salt.commandStatus(pepperEnv, 'I@galera:slave', 'service mysql status', 'running')
} catch (Exception er) {
common.warningMsg('Either there are no galera slaves or something failed when starting mysql on galera slaves')
}
sleep(5)
- salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
+ salt.cmdRun(pepperEnv, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
}
}