blob: da3e1779b38059831882c1cc52f2d4e04c685254 [file] [log] [blame]
Filip Pytloun0a07f702017-02-24 18:26:18 +01001/**
2 *
3 * Launch heat stack with CI/CD lab infrastructure
4 *
5 * Expected parameters:
6 * HEAT_TEMPLATE_URL URL to git repo with Heat templates
7 * HEAT_TEMPLATE_CREDENTIALS Credentials to the Heat templates repo
8 * HEAT_TEMPLATE_BRANCH Heat templates repo branch
9 * HEAT_STACK_NAME Heat stack name
10 * HEAT_STACK_TEMPLATE Heat stack HOT template
11 * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
12 * HEAT_STACK_ZONE Heat stack availability zone
13 * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
14 * HEAT_STACK_DELETE Delete Heat stack when finished (bool)
15 * HEAT_STACK_CLEANUP_JOB Name of job for deleting Heat stack
16 * HEAT_STACK_REUSE Reuse Heat stack (don't create one)
17 *
18 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
Filip Pytloune32fda82017-02-24 18:26:18 +010019 * SALT_MASTER_PORT Port of salt-api, defaults to 8000
Filip Pytloun0a07f702017-02-24 18:26:18 +010020 *
21 * OPENSTACK_API_URL OpenStack API address
22 * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
23 * OPENSTACK_API_PROJECT OpenStack project to connect to
24 * OPENSTACK_API_CLIENT Versions of OpenStack python clients
25 * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
26 *
27 */
28
Filip Pytlounad2b36b2017-03-04 20:33:41 +010029common = new com.mirantis.mk.Common()
Filip Pytloun0a07f702017-02-24 18:26:18 +010030git = new com.mirantis.mk.Git()
31openstack = new com.mirantis.mk.Openstack()
32salt = new com.mirantis.mk.Salt()
33orchestrate = new com.mirantis.mk.Orchestrate()
chnyda625f4b42017-10-11 14:10:31 +020034def python = new com.mirantis.mk.Python()
35
36def pepperEnv = "pepperEnv"
Jakub Josef458913d2017-05-10 15:37:56 +020037_MAX_PERMITTED_STACKS = 2
Jakub Josefa63f9862018-01-11 17:58:38 +010038timeout(time: 12, unit: 'HOURS') {
39 node {
Filip Pytlounbfce09d2017-03-01 19:00:43 +010040 try {
Jakub Josefa63f9862018-01-11 17:58:38 +010041 // connection objects
42 def openstackCloud
Filip Pytloun0a07f702017-02-24 18:26:18 +010043
Jakub Josefa63f9862018-01-11 17:58:38 +010044 // value defaults
45 def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
46 def openstackEnv = "${env.WORKSPACE}/venv"
Filip Pytloun0a07f702017-02-24 18:26:18 +010047
Jakub Josefa63f9862018-01-11 17:58:38 +010048 try {
49 sshPubKey = SSH_PUBLIC_KEY
50 } catch (MissingPropertyException e) {
51 sshPubKey = false
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040052 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010053
Jakub Josefa63f9862018-01-11 17:58:38 +010054 if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
55 error("If you want to reuse existing stack you need to provide it's name")
56 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040057
Jakub Josefa63f9862018-01-11 17:58:38 +010058 if (HEAT_STACK_REUSE.toBoolean() == false) {
59 // Don't allow to set custom heat stack name
60 wrap([$class: 'BuildUser']) {
61 if (env.BUILD_USER_ID) {
62 HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
63 } else {
64 HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
65 }
66 currentBuild.description = HEAT_STACK_NAME
67 }
68 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040069
Jakub Josefa63f9862018-01-11 17:58:38 +010070 //
71 // Bootstrap
72 //
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040073
Jakub Josefa63f9862018-01-11 17:58:38 +010074 stage ('Download Heat templates') {
75 git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
76 }
77
78 stage('Install OpenStack CLI') {
79 openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
80 }
81
82 stage('Connect to OpenStack cloud') {
Jakub Josef88aaf832018-01-18 16:18:28 +010083 openstackCloud = openstack.createOpenstackEnv(openstackEnv,
Jakub Josefa63f9862018-01-11 17:58:38 +010084 OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
85 OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
86 OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
87 OPENSTACK_API_VERSION)
88 openstack.getKeystoneToken(openstackCloud, openstackEnv)
89 wrap([$class: 'BuildUser']) {
90 if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
91 def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
92 if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
93 HEAT_STACK_DELETE = "false"
94 throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
95 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040096 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010097 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040098 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010099
Jakub Josefa63f9862018-01-11 17:58:38 +0100100 if (HEAT_STACK_REUSE.toBoolean() == false) {
101 stage('Launch new Heat stack') {
102 envParams = [
103 'cluster_zone': HEAT_STACK_ZONE,
104 'cluster_public_net': HEAT_STACK_PUBLIC_NET
105 ]
106 openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400107 }
108 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400109
Jakub Josefa63f9862018-01-11 17:58:38 +0100110 stage('Connect to Salt master') {
111 def saltMasterPort
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100112 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100113 saltMasterPort = SALT_MASTER_PORT
114 } catch (MissingPropertyException e) {
115 saltMasterPort = 6969
116 }
117 saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
118 currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
119 saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
120 python.setupPepperVirtualenv(pepperEnv, saltMasterUrl, SALT_MASTER_CREDENTIALS)
121 }
122
123 //
124 // Install
125 //
126
127 stage('Install core infra') {
128 // salt.master, reclass
129 // refresh_pillar
130 // sync_all
131 // linux,openssh,salt.minion.ntp
132
133 orchestrate.installFoundationInfra(pepperEnv)
134 orchestrate.validateFoundationInfra(pepperEnv)
135 }
136
137 stage("Deploy GlusterFS") {
138 salt.enforceState(pepperEnv, 'I@glusterfs:server', 'glusterfs.server.service', true)
139 retry(2) {
140 salt.enforceState(pepperEnv, 'ci01*', 'glusterfs.server.setup', true)
141 }
142 sleep(5)
143 salt.enforceState(pepperEnv, 'I@glusterfs:client', 'glusterfs.client', true)
144
145 timeout(5) {
146 println "Waiting for GlusterFS volumes to get mounted.."
147 salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
148 }
149 print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
150 }
151
152 stage("Deploy GlusterFS") {
153 salt.enforceState(pepperEnv, 'I@haproxy:proxy', 'haproxy,keepalived')
154 }
155
156 stage("Setup Docker Swarm") {
157 salt.enforceState(pepperEnv, 'I@docker:host', 'docker.host', true)
158 salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.swarm', true)
159 salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'salt', true)
160 salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.flush')
161 salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master', 'mine.update')
162 salt.enforceState(pepperEnv, 'I@docker:swarm', 'docker.swarm', true)
163 print common.prettyPrint(salt.cmdRun(pepperEnv, 'I@docker:swarm:role:master', 'docker node ls'))
164 }
165
166 stage("Configure OSS services") {
167 salt.enforceState(pepperEnv, 'I@devops_portal:config', 'devops_portal.config')
168 salt.enforceState(pepperEnv, 'I@rundeck:server', 'rundeck.server')
169 }
170
171 stage("Deploy Docker services") {
172 // We need /etc/aptly-publisher.yaml to be present before
173 // services are deployed
174 // XXX: for some weird unknown reason, refresh_pillar is
175 // required to execute here
176 salt.runSaltProcessStep(pepperEnv, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
177 salt.enforceState(pepperEnv, 'I@aptly:publisher', 'aptly.publisher', true)
178 retry(3) {
179 sleep(5)
180 salt.enforceState(pepperEnv, 'I@docker:swarm:role:master', 'docker.client')
181 }
182 // XXX: Workaround to have `/var/lib/jenkins` on all
183 // nodes where are jenkins_slave services are created.
Jakub Josef2c21c6c2018-02-08 18:51:42 +0100184 salt.cmdRun(pepperEnv, 'I@docker:swarm', "mkdir -p /var/lib/jenkins")
Jakub Josefa63f9862018-01-11 17:58:38 +0100185 }
186
187 stage("Configure CI/CD services") {
188 salt.syncAll(pepperEnv, '*')
189
190 // Aptly
191 timeout(10) {
192 println "Waiting for Aptly to come up.."
193 retry(2) {
194 // XXX: retry to workaround magical VALUE_TRIMMED
195 // response from salt master + to give slow cloud some
196 // more time to settle down
197 salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
198 }
199 }
200 salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
201
202 // OpenLDAP
203 timeout(10) {
204 println "Waiting for OpenLDAP to come up.."
205 salt.cmdRun(pepperEnv, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
206 }
207 salt.enforceState(pepperEnv, 'I@openldap:client', 'openldap', true)
208
209 // Gerrit
210 timeout(10) {
211 println "Waiting for Gerrit to come up.."
212 salt.cmdRun(pepperEnv, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
213 }
214 salt.enforceState(pepperEnv, 'I@gerrit:client', 'gerrit', true)
215
216 // Jenkins
217 timeout(10) {
218 println "Waiting for Jenkins to come up.."
219 salt.cmdRun(pepperEnv, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
220 }
221 retry(2) {
222 // XXX: needs retry as first run installs python-jenkins
223 // thus make jenkins modules available for second run
224 salt.enforceState(pepperEnv, 'I@jenkins:client', 'jenkins', true)
225 }
226
227 // Postgres client - initialize OSS services databases
228 timeout(300){
229 println "Waiting for postgresql database to come up.."
230 salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
231 }
232 // XXX: first run usually fails on some inserts, but we need to create databases at first
233 salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true, false)
234
235 // Setup postgres database with integration between
236 // Pushkin notification service and Security Monkey security audit service
237 timeout(10) {
238 println "Waiting for Pushkin to come up.."
239 salt.cmdRun(pepperEnv, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
240 }
241 salt.enforceState(pepperEnv, 'I@postgresql:client', 'postgresql.client', true)
242
243 // Rundeck
244 timeout(10) {
245 println "Waiting for Rundeck to come up.."
246 salt.cmdRun(pepperEnv, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
247 }
248 salt.enforceState(pepperEnv, 'I@rundeck:client', 'rundeck.client', true)
249
250 // Elasticsearch
251 timeout(10) {
252 println 'Waiting for Elasticsearch to come up..'
253 salt.cmdRun(pepperEnv, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
254 }
255 retry(3){
256 sleep(10)
257 // XXX: first run sometimes fails on update indexes, so we need to wait
258 salt.enforceState(pepperEnv, 'I@elasticsearch:client', 'elasticsearch.client', true)
259 }
260 }
261
262 stage("Finalize") {
263 //
264 // Deploy user's ssh key
265 //
266 def adminUser
267 def authorizedKeysFile
268 def adminUserCmdOut = salt.cmdRun(pepperEnv, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
269 if (adminUserCmdOut =~ /ubuntu user exists/) {
270 adminUser = "ubuntu"
271 authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
272 } else {
273 adminUser = "root"
274 authorizedKeysFile = "/root/.ssh/authorized_keys"
275 }
276
277 if (sshPubKey) {
278 println "Deploying provided ssh key at ${authorizedKeysFile}"
279 salt.cmdRun(pepperEnv, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
280 }
281
282 //
283 // Generate docs
284 //
285 try {
286 try {
287 // Run sphinx state to install sphinx-build needed in
288 // upcomming orchestrate
289 salt.enforceState(pepperEnv, 'I@sphinx:server', 'sphinx')
290 } catch (Throwable e) {
291 true
292 }
293 retry(3) {
294 // TODO: fix salt.orchestrateSystem
295 // print salt.orchestrateSystem(pepperEnv, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
296 def out = salt.cmdRun(pepperEnv, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
297 print common.prettyPrint(out)
298 if (out =~ /Command execution failed/) {
299 throw new Exception("Command execution failed")
300 }
301 }
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100302 } catch (Throwable e) {
Jakub Josefa63f9862018-01-11 17:58:38 +0100303 // We don't want sphinx docs to ruin whole build, so possible
304 // errors are just ignored here
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100305 true
306 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100307 salt.enforceState(pepperEnv, 'I@nginx:server', 'nginx')
308
309 def failedSvc = salt.cmdRun(pepperEnv, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
310 if (failedSvc =~ /Command execution failed/) {
311 common.errorMsg("Some services are not running. Environment may not be fully functional!")
Filip Pytlounbd619272017-03-22 12:21:01 +0100312 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100313
314 common.successMsg("""
315 ============================================================
316 Your CI/CD lab has been deployed and you can enjoy it:
317 Use sshuttle to connect to your private subnet:
318
319 sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
320
321 And visit services running at 172.16.10.254 (vip address):
322
323 9600 HAProxy statistics
324 8080 Gerrit
325 8081 Jenkins
326 8089 LDAP administration
327 4440 Rundeck
328 8084 DevOps Portal
329 8091 Docker swarm visualizer
330 8090 Reclass-generated documentation
331
332 If you provided SSH_PUBLIC_KEY, you can use it to login,
333 otherwise you need to get private key connected to this
334 heat template.
335
336 DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
337 ============================================================""")
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100338 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100339 } catch (Throwable e) {
340 // If there was an error or exception thrown, the build failed
341 currentBuild.result = "FAILURE"
342 currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
343 throw e
344 } finally {
345 // Cleanup
346 if (HEAT_STACK_DELETE.toBoolean() == true) {
347 stage('Trigger cleanup job') {
348 build(job: 'deploy-stack-cleanup', parameters: [
349 [$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
350 [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
351 ])
352 }
Filip Pytlounfd6726a2017-02-28 19:31:16 +0100353 }
Filip Pytloun23741982017-02-27 17:43:00 +0100354 }
Filip Pytlounf6e877f2017-02-28 19:38:16 +0100355 }
Filip Pytloun0a07f702017-02-24 18:26:18 +0100356}