blob: 3f452334e26c6c65053f7f6028fbb594be713ec3 [file] [log] [blame]
Filip Pytloun0a07f702017-02-24 18:26:18 +01001/**
2 *
3 * Launch heat stack with CI/CD lab infrastructure
4 *
5 * Expected parameters:
6 * HEAT_TEMPLATE_URL URL to git repo with Heat templates
7 * HEAT_TEMPLATE_CREDENTIALS Credentials to the Heat templates repo
8 * HEAT_TEMPLATE_BRANCH Heat templates repo branch
9 * HEAT_STACK_NAME Heat stack name
10 * HEAT_STACK_TEMPLATE Heat stack HOT template
11 * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
12 * HEAT_STACK_ZONE Heat stack availability zone
13 * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
14 * HEAT_STACK_DELETE Delete Heat stack when finished (bool)
15 * HEAT_STACK_CLEANUP_JOB Name of job for deleting Heat stack
16 * HEAT_STACK_REUSE Reuse Heat stack (don't create one)
17 *
18 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
Filip Pytloune32fda82017-02-24 18:26:18 +010019 * SALT_MASTER_PORT Port of salt-api, defaults to 8000
Filip Pytloun0a07f702017-02-24 18:26:18 +010020 *
21 * OPENSTACK_API_URL OpenStack API address
22 * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
23 * OPENSTACK_API_PROJECT OpenStack project to connect to
24 * OPENSTACK_API_CLIENT Versions of OpenStack python clients
25 * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
26 *
27 */
28
Filip Pytlounad2b36b2017-03-04 20:33:41 +010029common = new com.mirantis.mk.Common()
Filip Pytloun0a07f702017-02-24 18:26:18 +010030git = new com.mirantis.mk.Git()
31openstack = new com.mirantis.mk.Openstack()
32salt = new com.mirantis.mk.Salt()
33orchestrate = new com.mirantis.mk.Orchestrate()
Jakub Josef458913d2017-05-10 15:37:56 +020034_MAX_PERMITTED_STACKS = 2
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040035
36node {
37 try {
38 // connection objects
39 def openstackCloud
40 def saltMaster
41
42 // value defaults
43 def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
44 def openstackEnv = "${env.WORKSPACE}/venv"
45
Filip Pytlounbfce09d2017-03-01 19:00:43 +010046 try {
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040047 sshPubKey = SSH_PUBLIC_KEY
48 } catch (MissingPropertyException e) {
49 sshPubKey = false
50 }
Filip Pytloun0a07f702017-02-24 18:26:18 +010051
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040052 if (HEAT_STACK_REUSE.toBoolean() == true && HEAT_STACK_NAME == '') {
53 error("If you want to reuse existing stack you need to provide it's name")
54 }
Filip Pytloun0a07f702017-02-24 18:26:18 +010055
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040056 if (HEAT_STACK_REUSE.toBoolean() == false) {
57 // Don't allow to set custom heat stack name
58 wrap([$class: 'BuildUser']) {
59 if (env.BUILD_USER_ID) {
60 HEAT_STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
Filip Pytloun0da421f2017-03-03 18:50:45 +010061 } else {
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040062 HEAT_STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
Filip Pytloun0da421f2017-03-03 18:50:45 +010063 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040064 currentBuild.description = HEAT_STACK_NAME
65 }
66 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010067
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040068 //
69 // Bootstrap
70 //
71
72 stage ('Download Heat templates') {
73 git.checkoutGitRepository('template', HEAT_TEMPLATE_URL, HEAT_TEMPLATE_BRANCH, HEAT_TEMPLATE_CREDENTIALS)
74 }
75
76 stage('Install OpenStack CLI') {
77 openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
78 }
79
80 stage('Connect to OpenStack cloud') {
81 openstackCloud = openstack.createOpenstackEnv(
82 OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
83 OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
84 OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
85 OPENSTACK_API_VERSION)
86 openstack.getKeystoneToken(openstackCloud, openstackEnv)
87 wrap([$class: 'BuildUser']) {
88 if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !HEAT_STACK_REUSE.toBoolean()) {
89 def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
90 if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
91 HEAT_STACK_DELETE = "false"
92 throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
93 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010094 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040095 }
96 }
Filip Pytloun3eefd3d2017-03-03 14:13:41 +010097
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +040098 if (HEAT_STACK_REUSE.toBoolean() == false) {
99 stage('Launch new Heat stack') {
100 envParams = [
101 'instance_zone': HEAT_STACK_ZONE,
102 'public_net': HEAT_STACK_PUBLIC_NET
103 ]
104 openstack.createHeatStack(openstackCloud, HEAT_STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
105 }
106 }
107
108 stage('Connect to Salt master') {
109 def saltMasterPort
110 try {
111 saltMasterPort = SALT_MASTER_PORT
112 } catch (MissingPropertyException e) {
113 saltMasterPort = 6969
114 }
115 saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, HEAT_STACK_NAME, 'salt_master_ip', openstackEnv)
116 currentBuild.description = "${HEAT_STACK_NAME}: ${saltMasterHost}"
117 saltMasterUrl = "http://${saltMasterHost}:${saltMasterPort}"
118 saltMaster = salt.connection(saltMasterUrl, SALT_MASTER_CREDENTIALS)
119 }
120
121 //
122 // Install
123 //
124
125 stage('Install core infra') {
126 // salt.master, reclass
127 // refresh_pillar
128 // sync_all
129 // linux,openssh,salt.minion.ntp
130
131 orchestrate.installFoundationInfra(saltMaster)
132 orchestrate.validateFoundationInfra(saltMaster)
133 }
134
135 stage("Deploy GlusterFS") {
136 salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
137 retry(2) {
138 salt.enforceState(saltMaster, 'ci01*', 'glusterfs.server.setup', true)
139 }
140 sleep(5)
141 salt.enforceState(saltMaster, 'I@glusterfs:client', 'glusterfs.client', true)
142
143 timeout(5) {
144 println "Waiting for GlusterFS volumes to get mounted.."
145 salt.cmdRun(saltMaster, 'I@glusterfs:client', 'while true; do systemctl -a|grep "GlusterFS File System"|grep -v mounted >/dev/null || break; done')
146 }
147 print common.prettyPrint(salt.cmdRun(saltMaster, 'I@glusterfs:client', 'mount|grep fuse.glusterfs || echo "Command failed"'))
148 }
149
150 stage("Deploy GlusterFS") {
151 salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy,keepalived')
152 }
153
154 stage("Setup Docker Swarm") {
155 salt.enforceState(saltMaster, 'I@docker:host', 'docker.host', true)
156 salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.swarm', true)
157 salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'salt', true)
158 salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.flush')
159 salt.runSaltProcessStep(saltMaster, 'I@docker:swarm:role:master', 'mine.update')
160 salt.enforceState(saltMaster, 'I@docker:swarm', 'docker.swarm', true)
161 print common.prettyPrint(salt.cmdRun(saltMaster, 'I@docker:swarm:role:master', 'docker node ls'))
162 }
163
164 stage("Configure OSS services") {
165 salt.enforceState(saltMaster, 'I@devops_portal:config', 'devops_portal.config')
166 salt.enforceState(saltMaster, 'I@rundeck:server', 'rundeck.server')
167 }
168
169 stage("Deploy Docker services") {
170 // We need /etc/aptly-publisher.yaml to be present before
171 // services are deployed
172 // XXX: for some weird unknown reason, refresh_pillar is
173 // required to execute here
174 salt.runSaltProcessStep(saltMaster, 'I@aptly:publisher', 'saltutil.refresh_pillar', [], null, true)
175 salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
176 retry(3) {
177 sleep(5)
178 salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
179 }
180 // XXX: Workaround to have `/var/lib/jenkins` on all
181 // nodes where are jenkins_slave services are created.
182 salt.runSaltProcessStep(saltMaster, 'I@docker:swarm', 'cmd.run', ['mkdir -p /var/lib/jenkins'])
183 }
184
185 stage("Configure CI/CD services") {
186 salt.syncAll(saltMaster, '*')
187
188 // Aptly
189 timeout(10) {
190 println "Waiting for Aptly to come up.."
191 retry(2) {
192 // XXX: retry to workaround magical VALUE_TRIMMED
193 // response from salt master + to give slow cloud some
194 // more time to settle down
195 salt.cmdRun(saltMaster, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
196 }
197 }
198 salt.enforceState(saltMaster, 'I@aptly:server', 'aptly', true)
199
200 // OpenLDAP
201 timeout(10) {
202 println "Waiting for OpenLDAP to come up.."
203 salt.cmdRun(saltMaster, 'I@openldap:client', 'while true; do curl -sf ldap://172.16.10.254 >/dev/null && break; done')
204 }
205 salt.enforceState(saltMaster, 'I@openldap:client', 'openldap', true)
206
207 // Gerrit
208 timeout(10) {
209 println "Waiting for Gerrit to come up.."
210 salt.cmdRun(saltMaster, 'I@gerrit:client', 'while true; do curl -sf 172.16.10.254:8080 >/dev/null && break; done')
211 }
212 salt.enforceState(saltMaster, 'I@gerrit:client', 'gerrit', true)
213
214 // Jenkins
215 timeout(10) {
216 println "Waiting for Jenkins to come up.."
217 salt.cmdRun(saltMaster, 'I@jenkins:client', 'while true; do curl -sf 172.16.10.254:8081 >/dev/null && break; done')
218 }
219 retry(2) {
220 // XXX: needs retry as first run installs python-jenkins
221 // thus make jenkins modules available for second run
222 salt.enforceState(saltMaster, 'I@jenkins:client', 'jenkins', true)
223 }
224
225 // Postgres client - initialize OSS services databases
226 timeout(300){
227 println "Waiting for postgresql database to come up.."
228 salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do if docker service logs postgresql_db | grep "ready to accept"; then break; else sleep 5; fi; done')
229 }
230 salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true, false)
231
232 // Setup postgres database with integration between
233 // Pushkin notification service and Security Monkey security audit service
234 timeout(10) {
235 println "Waiting for Pushkin to come up.."
236 salt.cmdRun(saltMaster, 'I@postgresql:client', 'while true; do curl -sf 172.16.10.254:8887/apps >/dev/null && break; done')
237 }
238 salt.enforceState(saltMaster, 'I@postgresql:client', 'postgresql.client', true)
239
240 // Rundeck
241 timeout(10) {
242 println "Waiting for Rundeck to come up.."
243 salt.cmdRun(saltMaster, 'I@rundeck:client', 'while true; do curl -sf 172.16.10.254:4440 >/dev/null && break; done')
244 }
245 salt.enforceState(saltMaster, 'I@rundeck:client', 'rundeck.client', true)
246
247 // Elasticsearch
248 timeout(10) {
249 println 'Waiting for Elasticsearch to come up..'
250 salt.cmdRun(saltMaster, 'I@elasticsearch:client', 'while true; do curl -sf 172.16.10.254:9200 >/dev/null && break; done')
251 }
252 salt.enforceState(saltMaster, 'I@elasticsearch:client', 'elasticsearch.client', true)
253 }
254
255 stage("Finalize") {
256 //
257 // Deploy user's ssh key
258 //
259 def adminUser
260 def authorizedKeysFile
261 def adminUserCmdOut = salt.cmdRun(saltMaster, 'I@salt:master', "[ ! -d /home/ubuntu ] || echo 'ubuntu user exists'")
262 if (adminUserCmdOut =~ /ubuntu user exists/) {
263 adminUser = "ubuntu"
264 authorizedKeysFile = "/home/ubuntu/.ssh/authorized_keys"
265 } else {
266 adminUser = "root"
267 authorizedKeysFile = "/root/.ssh/authorized_keys"
268 }
269
270 if (sshPubKey) {
271 println "Deploying provided ssh key at ${authorizedKeysFile}"
272 salt.cmdRun(saltMaster, '*', "echo '${sshPubKey}' | tee -a ${authorizedKeysFile}")
273 }
274
275 //
276 // Generate docs
277 //
278 try {
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100279 try {
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400280 // Run sphinx state to install sphinx-build needed in
281 // upcomming orchestrate
282 salt.enforceState(saltMaster, 'I@sphinx:server', 'sphinx')
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100283 } catch (Throwable e) {
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100284 true
285 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400286 retry(3) {
287 // TODO: fix salt.orchestrateSystem
288 // print salt.orchestrateSystem(saltMaster, ['expression': '*', 'type': 'compound'], 'sphinx.orch.generate_doc')
289 def out = salt.cmdRun(saltMaster, 'I@salt:master', 'salt-run state.orchestrate sphinx.orch.generate_doc || echo "Command execution failed"')
290 print common.prettyPrint(out)
291 if (out =~ /Command execution failed/) {
292 throw new Exception("Command execution failed")
293 }
Filip Pytlounbd619272017-03-22 12:21:01 +0100294 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400295 } catch (Throwable e) {
296 // We don't want sphinx docs to ruin whole build, so possible
297 // errors are just ignored here
298 true
Filip Pytlounbfce09d2017-03-01 19:00:43 +0100299 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400300 salt.enforceState(saltMaster, 'I@nginx:server', 'nginx')
301
302 def failedSvc = salt.cmdRun(saltMaster, '*', """systemctl --failed | grep -E 'loaded[ \t]+failed' && echo 'Command execution failed' || true""")
303 print common.prettyPrint(failedSvc)
304 if (failedSvc =~ /Command execution failed/) {
305 common.errorMsg("Some services are not running. Environment may not be fully functional!")
306 }
307
308 common.successMsg("""
309============================================================
310Your CI/CD lab has been deployed and you can enjoy it:
311Use sshuttle to connect to your private subnet:
312
313 sshuttle -r ${adminUser}@${saltMasterHost} 172.16.10.0/24
314
315And visit services running at 172.16.10.254 (vip address):
316
317 9600 HAProxy statistics
318 8080 Gerrit
319 8081 Jenkins
320 8089 LDAP administration
321 4440 Rundeck
322 8084 DevOps Portal
323 8091 Docker swarm visualizer
324 8090 Reclass-generated documentation
325
326If you provided SSH_PUBLIC_KEY, you can use it to login,
327otherwise you need to get private key connected to this
328heat template.
329
330DON'T FORGET TO TERMINATE YOUR STACK WHEN YOU DON'T NEED IT!
331============================================================""")
332 }
333 } catch (Throwable e) {
334 // If there was an error or exception thrown, the build failed
335 currentBuild.result = "FAILURE"
336 throw e
337 } finally {
338 // Cleanup
339 if (HEAT_STACK_DELETE.toBoolean() == true) {
340 stage('Trigger cleanup job') {
341 build(job: 'deploy-stack-cleanup', parameters: [
342 [$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME],
343 [$class: 'StringParameterValue', name: 'OPENSTACK_API_PROJECT', value: OPENSTACK_API_PROJECT],
344 ])
Filip Pytlounfd6726a2017-02-28 19:31:16 +0100345 }
Filip Pytloun23741982017-02-27 17:43:00 +0100346 }
Filip Pytlounf6e877f2017-02-28 19:38:16 +0100347 }
Filip Pytloun0a07f702017-02-24 18:26:18 +0100348}