blob: 782a05137d3f5094b6d30ca8e2615abdb14f5fff [file] [log] [blame]
Filip Pytloun0a07f702017-02-24 18:26:18 +01001/**
2 *
3 * Launch heat stack with CI/CD lab infrastructure
4 *
5 * Expected parameters:
6 * HEAT_TEMPLATE_URL URL to git repo with Heat templates
7 * HEAT_TEMPLATE_CREDENTIALS Credentials to the Heat templates repo
8 * HEAT_TEMPLATE_BRANCH Heat templates repo branch
9 * HEAT_STACK_NAME Heat stack name
10 * HEAT_STACK_TEMPLATE Heat stack HOT template
11 * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
12 * HEAT_STACK_ZONE Heat stack availability zone
13 * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
14 * HEAT_STACK_DELETE Delete Heat stack when finished (bool)
15 * HEAT_STACK_CLEANUP_JOB Name of job for deleting Heat stack
16 * HEAT_STACK_REUSE Reuse Heat stack (don't create one)
17 *
18 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
Filip Pytloune32fda82017-02-24 18:26:18 +010019 * SALT_MASTER_PORT Port of salt-api, defaults to 8000
Filip Pytloun0a07f702017-02-24 18:26:18 +010020 *
21 * OPENSTACK_API_URL OpenStack API address
22 * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
23 * OPENSTACK_API_PROJECT OpenStack project to connect to
24 * OPENSTACK_API_CLIENT Versions of OpenStack python clients
25 * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
26 *
27 */
28
Filip Pytlounad2b36b2017-03-04 20:33:41 +010029common = new com.mirantis.mk.Common()
Filip Pytloun0a07f702017-02-24 18:26:18 +010030git = new com.mirantis.mk.Git()
31openstack = new com.mirantis.mk.Openstack()
32salt = new com.mirantis.mk.Salt()
33orchestrate = new com.mirantis.mk.Orchestrate()
Jakub Josef458913d2017-05-10 15:37:56 +020034_MAX_PERMITTED_STACKS = 2
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040035
36node {
37 try {
38 // connection objects
39 def openstackCloud
40 def saltMaster
41
42 // value defaults
43 def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
44 def openstackEnv = "${env.WORKSPACE}/venv"
45
Filip Pytlounbfce09d2017-03-01 19:00:43 +010046 try {
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040047 sshPubKey = SSH_PUBLIC_KEY
48 } catch (MissingPropertyException e) {
49 sshPubKey = false
50 }
Filip Pytloun0a07f702017-02-24 18:26:18 +010051
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040052 if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
53 error("If you want to reuse existing stack you need to provide it's name")
54 }
Filip Pytloun0a07f702017-02-24 18:26:18 +010055
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040056 if (HEAT_STACK_REUSE.toBoolean() == false) {
57 // Don't allow to set custom heat stack name
58 wrap([$class: 'BuildUser']) {
59 if (env.BUILD_USER_ID) {
60 HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
Filip Pytloun0da421f2017-03-03 18:50:45 +010061 } else {
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040062 HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
Filip Pytloun0da421f2017-03-03 18:50:45 +010063 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040064 currentBuild.description = HEAT_STACK_NAME
65 }
66 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010067
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040068 //
69 // Bootstrap
70 //
71
72 stage ('Download Heat templates') {
73 git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
74 }
75
76 stage('Install OpenStack CLI') {
77 openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
78 }
79
80 stage('Connect to OpenStack cloud') {
81 openstackCloud = openstack.createOpenstackEnv(
82 OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
83 OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
84 OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
85 OPENSTACK_API_VERSION)
86 openstack.getKeystoneToken(openstackCloud, openstackEnv)
87 wrap([$class: 'BuildUser']) {
88 if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
89 def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
90 if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
91 HEAT_STACK_DELETE = "false"
92 throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
93 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010094 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040095 }
96 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010097
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040098 if (HEAT_STACK_REUSE.toBoolean() == false) {
99 stage('Launch new Heat stack') {
100 envParams = [
Jakub Josefb03557a2017-08-09 17:23:10 +0200101 'cluster_zone': HEAT_STACK_ZONE,
102 'cluster_public_net': HEAT_STACK_PUBLIC_NET
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400103 ]
104 openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
105 }
106 }
107
108 stage('Connect to Salt master') {
109 def saltMasterPort
110 try {
111 saltMasterPort = SALT_MASTER_PORT
112 } catch (MissingPropertyException e) {
113 saltMasterPort = 6969
114 }
115 saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
116 currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
117 saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
118 saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
119 }
120
121 //
122 // Install
123 //
124
125 stage('Install core infra') {
126 // salt.master, reclass
127 // refresh_pillar
128 // sync_all
129 // linux,openssh,salt.minion.ntp
130
131 orchestrate.installFoundationInfra(saltMaster)
132 orchestrate.validateFoundationInfra(saltMaster)
133 }
134
135 stage("Deploy GlusterFS") {
136 salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
137 retry(2) {
138 salt.enforceState(saltMaster, 'ci01*', 'glusterfs.server.setup', true)
139 }
140 sleep(5)
141 salt.enforceState(saltMaster, 'I@glusterfs:client', 'glusterfs.client', true)
142
143 timeout(5) {
144 println "Waiting for GlusterFS volumes to get mounted.."
145 salt.cmdRun(saltMaster, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
146 }
147 print common.prettyPrint(salt.cmdRun(saltMaster, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
148 }
149
150 stage("Deploy GlusterFS") {
151 salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy,keepalived')
152 }
153
154 stage("Setup Docker Swarm") {
155 salt.enforceState(saltMaster, 'I@docker:host', 'docker.host', true)
156 salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.swarm', true)
157 salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'salt', true)
158 salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.flush')
159 salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.update')
160 salt.enforceState(saltMaster, 'I@docker:swarm', 'docker.swarm', true)
161 print common.prettyPrint(salt.cmdRun(saltMaster, 'I@docker:swarm:role:master', 'docker node ls'))
162 }
163
164 stage("Configure OSS services") {
165 salt.enforceState(saltMaster, 'I@devops_portal:config', 'devops_portal.config')
166 salt.enforceState(saltMaster, 'I@rundeck:server', 'rundeck.server')
167 }
168
169 stage("Deploy Docker services") {
170 // We need /etc/aptly-publisher.yaml to be present before
171 // services are deployed
172 // XXX: for some weird unknown reason, refresh_pillar is
173 // required to execute here
174 salt.runSaltProcessStep(saltMaster, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
175 salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
176 retry(3) {
177 sleep(5)
178 salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
179 }
180 // XXX: Workaround to have `/var/lib/jenkins` on all
181 // nodes where are jenkins_slave services are created.
182 salt.runSaltProcessStep(saltMaster, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
183 }
184
185 stage("Configure CI/CD services") {
186 salt.syncAll(saltMaster, '*')
187
188 // Aptly
189 timeout(10) {
190 println "Waiting for Aptly to come up.."
191 retry(2) {
192 // XXX: retry to workaround magical VALUE_TRIMMED
193 // response from salt master + to give slow cloud some
194 // more time to settle down
195 salt.cmdRun(saltMaster, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
196 }
197 }
198 salt.enforceState(saltMaster, 'I@aptly:server', 'aptly', true)
199
200 // OpenLDAP
201 timeout(10) {
202 println "Waiting for OpenLDAP to come up.."
203 salt.cmdRun(saltMaster, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
204 }
205 salt.enforceState(saltMaster, 'I@openldap:client', 'openldap', true)
206
207 // Gerrit
208 timeout(10) {
209 println "Waiting for Gerrit to come up.."
210 salt.cmdRun(saltMaster, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
211 }
212 salt.enforceState(saltMaster, 'I@gerrit:client', 'gerrit', true)
213
214 // Jenkins
215 timeout(10) {
216 println "Waiting for Jenkins to come up.."
217 salt.cmdRun(saltMaster, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
218 }
219 retry(2) {
220 // XXX: needs retry as first run installs python-jenkins
221 // thus make jenkins modules available for second run
222 salt.enforceState(saltMaster, 'I@jenkins:client', 'jenkins', true)
223 }
224
225 // Postgres client - initialize OSS services databases
226 timeout(300){
227 println "Waiting for postgresql database to come up.."
Vladislav Naumov474b1e62017-09-21 13:45:51 +0300228 salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_postgresql-db | grep "ready to accept"; then break; else sleep 5; fi; done')
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400229 }
Jakub Josef182d7bd2017-08-15 16:45:28 +0200230 // XXX: first run usually fails on some inserts, but we need to create databases at first
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400231 salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
232
233 // Setup postgres database with integration between
234 // Pushkin notification service and Security Monkey security audit service
235 timeout(10) {
236 println "Waiting for Pushkin to come up.."
237 salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
238 }
239 salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true)
240
241 // Rundeck
242 timeout(10) {
243 println "Waiting for Rundeck to come up.."
244 salt.cmdRun(saltMaster, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
245 }
246 salt.enforceState(saltMaster, 'I@rundeck:client', 'rundeck.client', true)
247
248 // Elasticsearch
249 timeout(10) {
250 println 'Waiting for Elasticsearch to come up..'
251 salt.cmdRun(saltMaster, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
252 }
Jakub Josefd6381832017-08-15 17:47:45 +0200253 retry(3){
254 sleep(10)
Jakub Josef182d7bd2017-08-15 16:45:28 +0200255 // XXX: first run sometimes fails on update indexes, so we need to wait
256 salt.enforceState(saltMaster, 'I@elasticsearch:client', 'elasticsearch.client', true)
257 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400258 }
259
260 stage("Finalize") {
261 //
262 // Deploy user's ssh key
263 //
264 def adminUser
265 def authorizedKeysFile
266 def adminUserCmdOut = salt.cmdRun(saltMaster, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
267 if (adminUserCmdOut =~ /ubuntu user exists/) {
268 adminUser = "ubuntu"
269 authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
270 } else {
271 adminUser = "root"
272 authorizedKeysFile = "/root/.ssh/authorized_keys"
273 }
274
275 if (sshPubKey) {
276 println "Deploying provided ssh key at ${authorizedKeysFile}"
277 salt.cmdRun(saltMaster, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
278 }
279
280 //
281 // Generate docs
282 //
283 try {
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100284 try {
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400285 // Run sphinx state to install sphinx-build needed in
286 // upcomming orchestrate
287 salt.enforceState(saltMaster, 'I@sphinx:server', 'sphinx')
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100288 } catch (Throwable e) {
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100289 true
290 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400291 retry(3) {
292 // TODO: fix salt.orchestrateSystem
293 // print salt.orchestrateSystem(saltMaster, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
294 def out = salt.cmdRun(saltMaster, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
295 print common.prettyPrint(out)
296 if (out =~ /Command execution failed/) {
297 throw new Exception("Command execution failed")
298 }
Filip Pytlounbd619272017-03-22 12:21:01 +0100299 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400300 } catch (Throwable e) {
301 // We don't want sphinx docs to ruin whole build, so possible
302 // errors are just ignored here
303 true
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100304 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400305 salt.enforceState(saltMaster, 'I@nginx:server', 'nginx')
306
307 def failedSvc = salt.cmdRun(saltMaster, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400308 if (failedSvc =~ /Command execution failed/) {
309 common.errorMsg("Some services are not running. Environment may not be fully functional!")
310 }
311
312 common.successMsg("""
313============================================================
314Your CI/CD lab has been deployed and you can enjoy it:
315Use sshuttle to connect to your private subnet:
316
317 sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
318
319And visit services running at 172.16.10.254 (vip address):
320
321 9600 HAProxy statistics
322 8080 Gerrit
323 8081 Jenkins
324 8089 LDAP administration
325 4440 Rundeck
326 8084 DevOps Portal
327 8091 Docker swarm visualizer
328 8090 Reclass-generated documentation
329
330If you provided SSH_PUBLIC_KEY, you can use it to login,
331otherwise you need to get private key connected to this
332heat template.
333
334DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
335============================================================""")
336 }
337 } catch (Throwable e) {
338 // If there was an error or exception thrown, the build failed
339 currentBuild.result = "FAILURE"
Jakub Josefd2efd7d2017-08-22 17:49:57 +0200340 currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400341 throw e
342 } finally {
343 // Cleanup
344 if (HEAT_STACK_DELETE.toBoolean() == true) {
345 stage('Trigger cleanup job') {
346 build(job: 'deploy-stack-cleanup', parameters: [
347 [$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
348 [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
349 ])
Filip Pytlounfd6726a2017-02-28 19:31:16 +0100350 }
Filip Pytloun23741982017-02-27 17:43:00 +0100351 }
Filip Pytlounf6e877f2017-02-28 19:38:16 +0100352 }
Filip Pytloun0a07f702017-02-24 18:26:18 +0100353}