blob: f4688b76dcd353a3ea995f9020b0de5aef636fe8 [file] [log] [blame]
Hanna Arhipova71ecc272019-08-20 14:54:22 +03001import pytest
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +03002import sys
3import os
Hanna Arhipova71ecc272019-08-20 14:54:22 +03004
5from tcp_tests import logger
6from tcp_tests import settings
Hanna Arhipova71ecc272019-08-20 14:54:22 +03007
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +03008sys.path.append(os.getcwd())
9try:
10 from tcp_tests.fixtures import config_fixtures
11 from tcp_tests.managers import underlay_ssh_manager
12 from tcp_tests.managers import saltmanager as salt_manager
13except ImportError:
14 print("ImportError: Run the application from the tcp-qa directory or "
15 "set the PYTHONPATH environment variable to directory which contains"
16 " ./tcp_tests")
17 sys.exit(1)
Hanna Arhipova71ecc272019-08-20 14:54:22 +030018LOG = logger.logger
19
20
Hanna Arhipovad35a29b2019-09-04 13:24:06 +030021def has_only_similar(values_by_nodes):
22 """
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +030023 :param values_by_nodes: dict
Hanna Arhipovad35a29b2019-09-04 13:24:06 +030024 :return: bool, True if all items in the dict have similar values
25 """
26 values = list(values_by_nodes.values())
27 return all(value == values[0] for value in values)
28
29
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +030030def get_control_plane_targets():
31 config = config_fixtures.config()
32 underlay = underlay_ssh_manager.UnderlaySSHManager(config)
33 saltmanager = salt_manager.SaltManager(config, underlay)
Hanna Arhipovafbcea852019-10-03 13:24:53 +030034 targets = list()
35 try:
36 targets += saltmanager.run_state(
37 "I@keystone:server", 'test.ping')[0]['return'][0].keys()
38 targets += saltmanager.run_state(
39 "I@nginx:server and not I@salt:master",
40 "test.ping")[0]['return'][0].keys()
41 except BaseException as err:
42 LOG.warning("Can't retrieve data from Salt. \
43 Maybe cluster is not deployed completely.\
44 Err: {}".format(err))
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +030045
46 # TODO: add check for Manila existence
47 # # Commented to avoid fails during OpenStack updates.
48 # # Anyway we don't have deployments with Manila yet
49 # targets.append('share*')
50 # TODO: add check for Tenant Telemetry existence
51 targets.append('mdb*')
52 # TODO: add check for Barbican existence
53 targets.append('kmn*')
54 return targets
55
56
57@pytest.fixture
58def switch_to_proposed_pipelines(reclass_actions, salt_actions):
59 reclass_actions.add_key(
60 "parameters._param.jenkins_pipelines_branch",
61 "release/proposed/2019.2.0",
62 "cluster/*/infra/init.yml"
63 )
64 salt_actions.enforce_state("I@jenkins:client", "jenkins.client")
65
66
Hanna Arhipova71ecc272019-08-20 14:54:22 +030067class TestUpdateMcpCluster(object):
68 """
69 Following the steps in
Hanna Arhipova94a8abe2019-08-22 14:11:46 +030070 https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update.html#minor-update
Hanna Arhipova71ecc272019-08-20 14:54:22 +030071 """
72
73 @pytest.mark.grab_versions
74 @pytest.mark.parametrize("_", [settings.ENV_NAME])
75 @pytest.mark.run_mcp_update
Hanna Arhipova17b2c102019-09-06 16:44:17 +030076 def test_update_drivetrain(self, salt_actions, drivetrain_actions,
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +030077 show_step, _, switch_to_proposed_pipelines):
Hanna Arhipova71ecc272019-08-20 14:54:22 +030078 """Updating DriveTrain component to release/proposed/2019.2.0 version
79
80 Scenario:
Hanna Arhipova17b2c102019-09-06 16:44:17 +030081 1. Add workaround for PROD-32751
Hanna Arhipova71ecc272019-08-20 14:54:22 +030082 2. Run job git-mirror-downstream-mk-pipelines
83 3. Run job git-mirror-downstream-pipeline-library
84 4. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain'
85
Hanna Arhipovad35a29b2019-09-04 13:24:06 +030086 Duration: ~70 min
Hanna Arhipova71ecc272019-08-20 14:54:22 +030087 """
88 salt = salt_actions
Hanna Arhipova17b2c102019-09-06 16:44:17 +030089 dt = drivetrain_actions
Hanna Arhipova71ecc272019-08-20 14:54:22 +030090
Hanna Arhipova17b2c102019-09-06 16:44:17 +030091 # #################### Add workaround for PROD-32751 #################
Hanna Arhipova71ecc272019-08-20 14:54:22 +030092 show_step(1)
93
Hanna Arhipova71ecc272019-08-20 14:54:22 +030094 # FIXME: workaround for PROD-32751
95 salt.cmd_run("cfg01*", "cd /srv/salt/reclass; git add -u && \
96 git commit --allow-empty -m 'Cluster model update'")
97
98 # ################### Downstream mk-pipelines #########################
99 show_step(2)
100 job_name = 'git-mirror-downstream-mk-pipelines'
101 job_parameters = {
102 'BRANCHES': 'release/proposed/2019.2.0'
103 }
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300104 update_pipelines = dt.start_job_on_cid_jenkins(
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300105 job_name=job_name,
106 job_parameters=job_parameters)
107
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300108 assert update_pipelines == 'SUCCESS'
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300109
110 # ################### Downstream pipeline-library ####################
111 show_step(3)
112 job_name = 'git-mirror-downstream-pipeline-library'
113 job_parameters = {
114 'BRANCHES': 'release/proposed/2019.2.0'
115 }
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300116 update_pipeline_library = dt.start_job_on_cid_jenkins(
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300117 job_name=job_name,
118 job_parameters=job_parameters)
119
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300120 assert update_pipeline_library == 'SUCCESS'
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300121
122 # ################### Start 'Deploy - upgrade MCP Drivetrain' job #####
123 show_step(4)
124
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300125 job_name = 'upgrade-mcp-release'
126 job_parameters = {
Hanna Arhipovad35a29b2019-09-04 13:24:06 +0300127 'GIT_REFSPEC': 'release/proposed/2019.2.0',
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300128 'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
129 'TARGET_MCP_VERSION': '2019.2.0'
130 }
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300131 update_drivetrain = dt.start_job_on_cid_jenkins(
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300132 job_name=job_name,
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300133 job_parameters=job_parameters,
Hanna Arhipovad35a29b2019-09-04 13:24:06 +0300134 build_timeout=90*60)
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300135
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300136 assert update_drivetrain == 'SUCCESS'
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300137
Hanna Arhipova94a8abe2019-08-22 14:11:46 +0300138 @pytest.mark.grab_versions
139 @pytest.mark.parametrize("_", [settings.ENV_NAME])
Hanna Arhipova71ecc272019-08-20 14:54:22 +0300140 @pytest.mark.run_mcp_update
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300141 def test_update_glusterfs(self, salt_actions, reclass_actions,
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300142 drivetrain_actions, show_step, _):
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300143 """ Upgrade GlusterFS
144 Scenario:
145 1. In infra/init.yml in Reclass, add the glusterfs_version parameter
146 2. Start linux.system.repo state
147 3. Start "update-glusterfs" job
148 4. Check version for GlusterFS servers
149 5. Check version for GlusterFS clients
150
151 """
152 salt = salt_actions
153 reclass = reclass_actions
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300154 dt = drivetrain_actions
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300155
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300156 # ############## Change reclass ######################################
157 show_step(1)
158 reclass.add_key(
159 "parameters._param.linux_system_repo_mcp_glusterfs_version_number",
160 "5",
161 "cluster/*/infra/init.yml"
162 )
163 # ################# Run linux.system state ###########################
164 show_step(2)
165 salt.enforce_state("*", "linux.system.repo")
166
167 # ############## Start deploy-upgrade-galera job #####################
168 show_step(3)
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300169 job_name = 'update-glusterfs'
170
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300171 update_glusterfs = dt.start_job_on_cid_jenkins(
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300172 job_name=job_name,
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300173 build_timeout=40 * 60)
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300174
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300175 assert update_glusterfs == 'SUCCESS'
Hanna Arhipovacc3759b2019-08-28 16:01:11 +0300176
177 # ################ Check GlusterFS version for servers ##############
178 show_step(4)
179 gluster_server_versions_by_nodes = salt.cmd_run(
180 "I@glusterfs:server",
181 "glusterd --version|head -n1")[0]
182
183 assert has_only_similar(gluster_server_versions_by_nodes),\
184 gluster_server_versions_by_nodes
185
186 # ################ Check GlusterFS version for clients ##############
187 show_step(5)
188 gluster_client_versions_by_nodes = salt.cmd_run(
189 "I@glusterfs:client",
190 "glusterfs --version|head -n1")[0]
191
192 assert has_only_similar(gluster_client_versions_by_nodes), \
193 gluster_client_versions_by_nodes
194
195 @pytest.mark.grab_versions
196 @pytest.mark.parametrize("_", [settings.ENV_NAME])
197 @pytest.mark.run_mcp_update
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300198 def test_update_galera(self, salt_actions, reclass_actions,
199 drivetrain_actions, show_step, _):
Hanna Arhipova94a8abe2019-08-22 14:11:46 +0300200 """ Upgrade Galera automatically
201
202 Scenario:
203 1. Include the Galera upgrade pipeline job to DriveTrain
204 2. Apply the jenkins.client state on the Jenkins nodes
205 3. set the openstack_upgrade_enabled parameter to true
206 4. Refresh pillars
207 5. Add repositories with new Galera packages
208 6. Start job from Jenkins
209 """
210 salt = salt_actions
211 reclass = reclass_actions
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300212 dt = drivetrain_actions
Hanna Arhipova94a8abe2019-08-22 14:11:46 +0300213 # ################### Enable pipeline #################################
214 show_step(1)
215 reclass.add_class(
216 "system.jenkins.client.job.deploy.update.upgrade_galera",
217 "cluster/*/cicd/control/leader.yml")
218 show_step(2)
219 salt.enforce_state("I@jenkins:client", "jenkins.client")
220
221 # ############### Enable automatic upgrade ############################
222 show_step(3)
223 reclass.add_bool_key("parameters._param.openstack_upgrade_enabled",
224 "True",
225 "cluster/*/infra/init.yml")
226
227 show_step(4)
228 salt.enforce_state("dbs*", "saltutil.refresh_pillar")
229
230 # ############# Add repositories with new Galera packages #######
231 show_step(5)
232 salt.enforce_state("dbs*", "linux.system.repo")
233 salt.enforce_state("cfg*", "salt.master")
234
Hanna Arhipova94a8abe2019-08-22 14:11:46 +0300235 # #################### Login Jenkins on cid01 node ###################
236 show_step(6)
237
Hanna Arhipova94a8abe2019-08-22 14:11:46 +0300238 job_name = 'deploy-upgrade-galera'
239 job_parameters = {
240 'INTERACTIVE': 'false'
241 }
242
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300243 update_galera = dt.start_job_on_cid_jenkins(
Hanna Arhipova94a8abe2019-08-22 14:11:46 +0300244 job_name=job_name,
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300245 job_parameters=job_parameters,
246 build_timeout=40 * 60)
Hanna Arhipova94a8abe2019-08-22 14:11:46 +0300247
Hanna Arhipova17b2c102019-09-06 16:44:17 +0300248 assert update_galera == 'SUCCESS'
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300249
250 @pytest.fixture
251 def disable_automatic_failover_neutron_for_test(self, salt_actions):
252 """
253 On each OpenStack controller node, modify the neutron.conf file
254 Restart the neutron-server service
255 """
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300256 def comment_line(node, file_name, word):
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300257 """
258 Adds '#' before the specific line in specific file
259
260 :param node: string, salt target of node where the file locates
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300261 :param file_name: string, full path to the file
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300262 :param word: string, the begin of line which should be commented
263 :return: None
264 """
265 salt_actions.cmd_run(node,
266 "sed -i 's/^{word}/#{word}/' {file}".
267 format(word=word,
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300268 file=file_name))
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300269
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300270 def add_line(node, file_name, line):
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300271 """
272 Appends line to the end of file
273
274 :param node: string, salt target of node where the file locates
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300275 :param file_name: string, full path to the file
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300276 :param line: string, line that should be added
277 :return: None
278 """
279 salt_actions.cmd_run(node, "echo {line} >> {file}".format(
280 line=line,
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300281 file=file_name))
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300282
283 neutron_conf = '/etc/neutron/neutron.conf'
284 neutron_server = "I@neutron:server"
285 # ######## Create backup for config file #######################
286 salt_actions.cmd_run(
287 neutron_server,
288 "cp -p {file} {file}.backup".format(file=neutron_conf))
289
290 # ## Change parameters in neutron.conf'
291 comment_line(neutron_server, neutron_conf,
292 "allow_automatic_l3agent_failover",)
293 comment_line(neutron_server, neutron_conf,
294 "allow_automatic_dhcp_failover")
295 add_line(neutron_server, neutron_conf,
296 "allow_automatic_dhcp_failover = false")
297 add_line(neutron_server, neutron_conf,
298 "allow_automatic_l3agent_failover = false")
299
300 # ## Apply changed config to the neutron-server service
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300301 result = salt_actions.cmd_run(neutron_server,
302 "service neutron-server restart")
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300303 # TODO: add check that neutron-server is up and running
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300304 yield result
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300305 # ## Revert file changes
306 salt_actions.cmd_run(
307 neutron_server,
308 "cp -p {file}.backup {file}".format(file=neutron_conf))
309 salt_actions.cmd_run(neutron_server,
310 "service neutron-server restart")
311
312 @pytest.fixture
313 def disable_neutron_agents_for_test(self, salt_actions):
314 """
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300315 Disable the neutron services before the test and
316 enable it after test
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300317 """
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300318 result = salt_actions.cmd_run("I@neutron:server", """
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300319 service neutron-dhcp-agent stop && \
320 service neutron-l3-agent stop && \
321 service neutron-metadata-agent stop && \
322 service neutron-openvswitch-agent stop
323 """)
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300324 yield result
325 #
Hanna Arhipova1fcaf442019-09-06 15:30:45 +0300326 salt_actions.cmd_run("I@neutron:server", """
327 service neutron-dhcp-agent start && \
328 service neutron-l3-agent start && \
329 service neutron-metadata-agent start && \
330 service neutron-openvswitch-agent start
331 """)
332 # TODO: add check that all services are UP and running
333
334 @pytest.mark.grab_versions
335 @pytest.mark.parametrize("_", [settings.ENV_NAME])
336 @pytest.mark.run_mcp_update
337 def test_update_rabbit(self, salt_actions, reclass_actions,
338 drivetrain_actions, show_step, _,
339 disable_automatic_failover_neutron_for_test,
340 disable_neutron_agents_for_test):
341 """ Updates RabbitMQ
342 Scenario:
343 1. Include the RabbitMQ upgrade pipeline job to DriveTrain
344 2. Add repositories with new RabbitMQ packages
345 3. Start Deploy - upgrade RabbitMQ pipeline
346
347 Updating RabbitMq should be completed before the OpenStack updating
348 process starts
349 """
350 salt = salt_actions
351 reclass = reclass_actions
352 dt = drivetrain_actions
353
354 # ####### Include the RabbitMQ upgrade pipeline job to DriveTrain ####
355 show_step(1)
356 reclass.add_class(
357 "system.jenkins.client.job.deploy.update.upgrade_rabbitmq",
358 "cluster/*/cicd/control/leader.yml")
359 salt.enforce_state("I@jenkins:client", "jenkins.client")
360
361 reclass.add_bool_key("parameters._param.openstack_upgrade_enabled",
362 "True",
363 "cluster/*/infra/init.yml")
364 salt.run_state("I@rabbitmq:server", "saltutil.refresh_pillar")
365
366 # ########### Add repositories with new RabbitMQ packages ############
367 show_step(2)
368 salt.enforce_state("I@rabbitmq:server", "linux.system.repo")
369
370 # ########### Start Deploy - upgrade RabbitMQ pipeline ############
371 show_step(3)
372 job_parameters = {
373 'INTERACTIVE': 'false'
374 }
375
376 update_rabbit = dt.start_job_on_cid_jenkins(
377 job_name='deploy-upgrade-rabbitmq',
378 job_parameters=job_parameters,
379 build_timeout=40 * 60
380 )
381 assert update_rabbit == 'SUCCESS'
Hanna Arhipovad35a29b2019-09-04 13:24:06 +0300382
383 @pytest.mark.grab_versions
384 @pytest.mark.parametrize("_", [settings.ENV_NAME])
385 @pytest.mark.run_mcp_update
386 def test_update_ceph(self, salt_actions, drivetrain_actions, show_step, _):
387 """ Updates Ceph to the latest minor version
388
389 Scenario:
390 1. Add workaround for unhealth Ceph
391 2. Start ceph-upgrade job with default parameters
392 3. Check Ceph version for all nodes
393
394 https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update/ceph-update.html
395 """
396 salt = salt_actions
397 dt = drivetrain_actions
398
399 # ###################### Add workaround for unhealth Ceph ############
400 show_step(1)
401 salt.cmd_run("I@ceph:radosgw",
402 "ceph config set 'mon pg warn max object skew' 20")
403 # ###################### Start ceph-upgrade pipeline #################
404 show_step(2)
405 job_parameters = {}
406
407 update_ceph = dt.start_job_on_cid_jenkins(
408 job_name='ceph-update',
409 job_parameters=job_parameters)
410
411 assert update_ceph == 'SUCCESS'
412
413 # ########## Verify Ceph version #####################################
414 show_step(3)
415
416 ceph_version_by_nodes = salt.cmd_run(
417 "I@ceph:* and not I@ceph:monitoring and not I@ceph:backup:server",
418 "ceph version")[0]
419
420 assert has_only_similar(ceph_version_by_nodes), ceph_version_by_nodes
Hanna Arhipovaeb3a2112019-09-13 18:45:21 +0300421
422
423class TestOpenstackUpdate(object):
424
425 @pytest.mark.grab_versions
426 @pytest.mark.run_mcp_update
427 def test__pre_update__enable_pipeline_job(self,
428 reclass_actions, salt_actions,
429 show_step):
430 """ Enable pipeline in the Drivetrain
431
432 Scenario:
433 1. Add deploy.update.* classes to the reclass
434 2. Start jenkins.client salt state
435
436 """
437 salt = salt_actions
438 reclass = reclass_actions
439 show_step(1)
440 reclass.add_class("system.jenkins.client.job.deploy.update.upgrade",
441 "cluster/*/cicd/control/leader.yml")
442
443 reclass.add_class(
444 "system.jenkins.client.job.deploy.update.upgrade_ovs_gateway",
445 "cluster/*/cicd/control/leader.yml")
446
447 reclass.add_class(
448 "system.jenkins.client.job.deploy.update.upgrade_compute",
449 "cluster/*/cicd/control/leader.yml")
450
451 show_step(2)
452 r, errors = salt.enforce_state("I@jenkins:client", "jenkins.client")
453 assert errors is None
454
455 @pytest.mark.grab_versions
456 @pytest.mark.parametrize('target', get_control_plane_targets())
457 @pytest.mark.run_mcp_update
458 def test__update__control_plane(self, drivetrain_actions,
459 switch_to_proposed_pipelines, target):
460 """Start 'Deploy - upgrade control VMs' for specific node
461 """
462 job_parameters = {
463 "TARGET_SERVERS": target,
464 "INTERACTIVE": False}
465 upgrade_control_pipeline = drivetrain_actions.start_job_on_cid_jenkins(
466 job_name="deploy-upgrade-control",
467 job_parameters=job_parameters)
468
469 assert upgrade_control_pipeline == 'SUCCESS'
470
471 @pytest.mark.grab_versions
472 @pytest.mark.run_mcp_update
473 def test__update__data_plane(self, drivetrain_actions):
474 """Start 'Deploy - upgrade OVS gateway'
475 """
476 job_parameters = {
477 "INTERACTIVE": False}
478 upgrade_data_pipeline = drivetrain_actions.start_job_on_cid_jenkins(
479 job_name="deploy-upgrade-ovs-gateway",
480 job_parameters=job_parameters)
481
482 assert upgrade_data_pipeline == 'SUCCESS'