Use Pepper instead of sending http request from jenkins master
Change-Id: I2ddfe19ae9c70731989da7ba07fa7720da797721
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 782a051..d865b25 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -31,13 +31,15 @@
openstack = new com.mirantis.mk.Openstack()
salt = new com.mirantis.mk.Salt()
orchestrate = new com.mirantis.mk.Orchestrate()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
_MAX_PERMITTED_STACKS = 2
node {
try {
// connection objects
def openstackCloud
- def saltMaster
// value defaults
def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
@@ -115,7 +117,7 @@
saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
- saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
+ python.setupPepperVirtualenv(venvPepper, saltMasterUrl, SALT_MASTER_CREDENTIALS)
}
//
@@ -128,42 +130,42 @@
// sync_all
// linux,openssh,salt.minion.ntp
- orchestrate.installFoundationInfra(saltMaster)
- orchestrate.validateFoundationInfra(saltMaster)
+ orchestrate.installFoundationInfra(pepperEnv)
+ orchestrate.validateFoundationInfra(pepperEnv)
}
stage("Deploy GlusterFS") {
- salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
+ salt.enforceState(pepperEnv, 'I@glusterfs:server', 'glusterfs.server.service', true)
retry(2) {
- salt.enforceState(saltMaster, 'ci01*', 'glusterfs.server.setup', true)
+ salt.enforceState(pepperEnv, 'ci01*', 'glusterfs.server.setup', true)
}
sleep(5)
- salt.enforceState(saltMaster, 'I@glusterfs:client', 'glusterfs.client', true)
+ salt.enforceState(pepperEnv, 'I@glusterfs:client', 'glusterfs.client', true)
timeout(5) {
println "Waiting for GlusterFS volumes to get mounted.."
- salt.cmdRun(saltMaster, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
+ salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
}
- print common.prettyPrint(salt.cmdRun(saltMaster, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
+ print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
}
stage("Deploy GlusterFS") {
- salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy,keepalived')
+ salt.enforceState(pepperEnv, 'I@haproxy:proxy', 'haproxy,keepalived')
}
stage("Setup Docker Swarm") {
- salt.enforceState(saltMaster, 'I@docker:host', 'docker.host', true)
- salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.swarm', true)
- salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'salt', true)
- salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.flush')
- salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.update')
- salt.enforceState(saltMaster, 'I@docker:swarm', 'docker.swarm', true)
- print common.prettyPrint(salt.cmdRun(saltMaster, 'I@docker:swarm:role:master', 'docker node ls'))
+ salt.enforceState(pepperEnv, 'I@docker:host', 'docker.host', true)
+ salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.swarm', true)
+ salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'salt', true)
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.flush')
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.update')
+ salt.enforceState(pepperEnv, 'I@docker:swarm', 'docker.swarm', true)
+ print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@docker:swarm:role:master', 'docker node ls'))
}
stage("Configure OSS services") {
- salt.enforceState(saltMaster, 'I@devops_portal:config', 'devops_portal.config')
- salt.enforceState(saltMaster, 'I@rundeck:server', 'rundeck.server')
+ salt.enforceState(pepperEnv, 'I@devops_portal:config', 'devops_portal.config')
+ salt.enforceState(pepperEnv, 'I@rundeck:server', 'rundeck.server')
}
stage("Deploy Docker services") {
@@ -171,19 +173,19 @@
// services are deployed
// XXX: for some weird unknown reason, refresh_pillar is
// required to execute here
- salt.runSaltProcessStep(saltMaster, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
+ salt.runSaltProcessStep(pepperEnv, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(pepperEnv, 'I@aptly:publisher', 'aptly.publisher', true)
retry(3) {
sleep(5)
- salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
+ salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.client')
}
// XXX: Workaround to have `/var/lib/jenkins` on all
// nodes where are jenkins_slave services are created.
- salt.runSaltProcessStep(saltMaster, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
}
stage("Configure CI/CD services") {
- salt.syncAll(saltMaster, '*')
+ salt.syncAll(pepperEnv, '*')
// Aptly
timeout(10) {
@@ -192,68 +194,68 @@
// XXX: retry to workaround magical VALUE_TRIMMED
// response from salt master + to give slow cloud some
// more time to settle down
- salt.cmdRun(saltMaster, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
}
}
- salt.enforceState(saltMaster, 'I@aptly:server', 'aptly', true)
+ salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
// OpenLDAP
timeout(10) {
println "Waiting for OpenLDAP to come up.."
- salt.cmdRun(saltMaster, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@openldap:client', 'openldap', true)
+ salt.enforceState(pepperEnv, 'I@openldap:client', 'openldap', true)
// Gerrit
timeout(10) {
println "Waiting for Gerrit to come up.."
- salt.cmdRun(saltMaster, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@gerrit:client', 'gerrit', true)
+ salt.enforceState(pepperEnv, 'I@gerrit:client', 'gerrit', true)
// Jenkins
timeout(10) {
println "Waiting for Jenkins to come up.."
- salt.cmdRun(saltMaster, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
}
retry(2) {
// XXX: needs retry as first run installs python-jenkins
// thus make jenkins modules available for second run
- salt.enforceState(saltMaster, 'I@jenkins:client', 'jenkins', true)
+ salt.enforceState(pepperEnv, 'I@jenkins:client', 'jenkins', true)
}
// Postgres client - initialize OSS services databases
timeout(300){
println "Waiting for postgresql database to come up.."
- salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
+ salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
}
// XXX: first run usually fails on some inserts, but we need to create databases at first
- salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
+ salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true, false)
// Setup postgres database with integration between
// Pushkin notification service and Security Monkey security audit service
timeout(10) {
println "Waiting for Pushkin to come up.."
- salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true)
+ salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true)
// Rundeck
timeout(10) {
println "Waiting for Rundeck to come up.."
- salt.cmdRun(saltMaster, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
}
- salt.enforceState(saltMaster, 'I@rundeck:client', 'rundeck.client', true)
+ salt.enforceState(pepperEnv, 'I@rundeck:client', 'rundeck.client', true)
// Elasticsearch
timeout(10) {
println 'Waiting for Elasticsearch to come up..'
- salt.cmdRun(saltMaster, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
}
retry(3){
sleep(10)
// XXX: first run sometimes fails on update indexes, so we need to wait
- salt.enforceState(saltMaster, 'I@elasticsearch:client', 'elasticsearch.client', true)
+ salt.enforceState(pepperEnv, 'I@elasticsearch:client', 'elasticsearch.client', true)
}
}
@@ -263,7 +265,7 @@
//
def adminUser
def authorizedKeysFile
- def adminUserCmdOut = salt.cmdRun(saltMaster, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
+ def adminUserCmdOut = salt.cmdRun(pepperEnv, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
if (adminUserCmdOut =~ /ubuntu user exists/) {
adminUser = "ubuntu"
authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
@@ -274,7 +276,7 @@
if (sshPubKey) {
println "Deploying provided ssh key at ${authorizedKeysFile}"
- salt.cmdRun(saltMaster, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
+ salt.cmdRun(pepperEnv, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
}
//
@@ -284,14 +286,14 @@
try {
// Run sphinx state to install sphinx-build needed in
// upcomming orchestrate
- salt.enforceState(saltMaster, 'I@sphinx:server', 'sphinx')
+ salt.enforceState(pepperEnv, 'I@sphinx:server', 'sphinx')
} catch (Throwable e) {
true
}
retry(3) {
// TODO: fix salt.orchestrateSystem
- // print salt.orchestrateSystem(saltMaster, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
- def out = salt.cmdRun(saltMaster, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
+ // print salt.orchestrateSystem(pepperEnv, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
+ def out = salt.cmdRun(pepperEnv, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
print common.prettyPrint(out)
if (out =~ /Command execution failed/) {
throw new Exception("Command execution failed")
@@ -302,9 +304,9 @@
// errors are just ignored here
true
}
- salt.enforceState(saltMaster, 'I@nginx:server', 'nginx')
+ salt.enforceState(pepperEnv, 'I@nginx:server', 'nginx')
- def failedSvc = salt.cmdRun(saltMaster, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
+ def failedSvc = salt.cmdRun(pepperEnv, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
if (failedSvc =~ /Command execution failed/) {
common.errorMsg("Some services are not running. Environment may not be fully functional!")
}