blob: 247dcbe62e83636dca32362c0c965da8df8549ad [file] [log] [blame]
Dmitriy Kruglovb811e642022-10-06 12:24:33 +02001import argparse
2import os
3import re
4import sys
5
6import openstack
7
8
9# Send logs to both, a log file and stdout
10openstack.enable_logging(debug=False, path='openstack.log', stream=sys.stdout)
11
12# Connect to cloud
13TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'os-cloud')
14cloud = openstack.connect(cloud=TEST_CLOUD)
15log = cloud.log
16
17# Get cloud config (clouds.yaml vars)
18config_obj = openstack.config.loader.OpenStackConfig()
19cloud_config = config_obj.get_one(cloud=TEST_CLOUD)
20
21compute = cloud.compute
22identity = cloud.identity
23image = cloud.image
24network = cloud.network
25orchestration = cloud.orchestration
26object_store = cloud.object_store
27volume = cloud.volume
Ievgeniia Zadorozhnaab334132023-08-01 23:20:13 +030028load_balancer = cloud.load_balancer
Dmitriy Kruglovb811e642022-10-06 12:24:33 +020029
30mask = "cvp|s_rally|rally_|tempest-|tempest_|spt|fio"
31full_mask = f"^(?!.*(manual|-static-)).*({mask}).*$"
32mask_pattern = re.compile(full_mask, re.IGNORECASE)
33stack_mask = "api-[0-9]+-[a-z]+"
34stack_pattern = re.compile(stack_mask, re.IGNORECASE)
35
36
37def get_resource_value(resource_key, default):
38 try:
39 return cloud_config.config['custom_vars'][resource_key]
40 except KeyError:
41 return default
42
43
44def _filter_test_resources(resources, attribute, pattern=mask_pattern):
45 filtered_resources = {}
46 for item in resources:
47 # If there is no attribute in object, use just empty string as value
48 value = getattr(item, attribute, '')
49 # If the attribute value is None, use empty string instead, to be
50 # able to run regex search
51 if value is None:
52 value = ''
53 found = pattern.match(value)
54 if found:
55 filtered_resources[item.id] = getattr(item, attribute)
56 return filtered_resources
57
58
59def _log_resources_count(count, resource, pattern=mask):
60 log.info(f"{count} {resource} containing '{pattern}' are found.")
61
62
63def _log_resource_delete(id_, name, type_):
64 log.info(f"... deleting {name} (id={id_}) {type_}")
65
66
Ievgeniia Zadorozhnaab334132023-08-01 23:20:13 +030067def _force_delete_load_balancer(id_):
68 log.info(f"... ... force deleting {id_} load balancer")
69 lb_ep = load_balancer.get_endpoint()
70 lb_uri = f"{lb_ep}/lbaas/loadbalancers/{id_}"
71 headers = {'X-Auth-Token': cloud.session.get_token(),
72 'Content-Type': 'application/json'}
73 params = {'cascade': 'true', 'force': 'true'}
74 cloud.session.request(url=lb_uri, method='DELETE',
75 headers=headers, params=params)
76
77
Dmitriy Kruglovb811e642022-10-06 12:24:33 +020078def cleanup_users():
79 users = identity.users()
80 users_to_delete = _filter_test_resources(users, 'name')
81 _log_resources_count(len(users_to_delete), 'user(s)')
82 if args.dry_run:
83 return
84 for id_ in users_to_delete:
85 _log_resource_delete(id_, users_to_delete[id_], 'user')
86 identity.delete_user(id_)
87
88
89def cleanup_roles():
90 roles = identity.roles()
91 roles_to_delete = _filter_test_resources(roles, 'name')
92 _log_resources_count(len(roles_to_delete), 'role(s)')
93 if args.dry_run:
94 return
95 for id_ in roles_to_delete:
96 _log_resource_delete(id_, roles_to_delete[id_], 'role')
97 identity.delete_role(id_)
98
99
100def cleanup_projects():
101 projects = identity.projects()
102 projects_to_delete = _filter_test_resources(projects, 'name')
103 _log_resources_count(len(projects_to_delete), 'project(s)')
104 if args.dry_run:
105 return
106 for id_ in projects_to_delete:
107 _log_resource_delete(id_, projects_to_delete[id_], 'project')
108 identity.delete_project(id_)
109
110
111def cleanup_regions():
112 regions = identity.regions()
113 regions_to_delete = _filter_test_resources(regions, 'id')
114 _log_resources_count(len(regions_to_delete), 'region(s)')
115 if args.dry_run:
116 return
117 for id_ in regions_to_delete:
118 _log_resource_delete(id_, id_, 'region')
119 identity.delete_region(id_)
120
121
122def cleanup_services():
123 services = identity.services()
124 services_to_delete = _filter_test_resources(services, 'name')
125 _log_resources_count(len(services_to_delete), 'service(s)')
126 if args.dry_run:
127 return
128 for id_ in services_to_delete:
129 _log_resource_delete(id_, services_to_delete[id_], 'service')
130 identity.delete_service(id_)
131
132
133def cleanup_stacks(stacks_alt=False):
134 stacks = orchestration.stacks()
135 stacks_to_delete = _filter_test_resources(stacks, 'name')
136 _log_resources_count(len(stacks_to_delete), 'stack(s)')
137
138 # Use additional pattern for searching/deleting test Heat resources,
139 # if enabled
140 if stacks_alt:
141 stacks_alt_to_delete = _filter_test_resources(
142 stacks, 'name', stack_pattern)
143 _log_resources_count(len(stacks_alt_to_delete), 'stack(s)', stack_mask)
144 stacks_to_delete.update(stacks_alt_to_delete)
145
146 if args.dry_run:
147 return
148
149 for id_ in stacks_to_delete:
150 _log_resource_delete(id_, stacks_to_delete[id_], 'stack')
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300151 stack_obj = orchestration.get_stack(id_)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200152 orchestration.delete_stack(id_)
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300153 orchestration.wait_for_delete(stack_obj)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200154
155
156def cleanup_flavors():
157 flavors = compute.flavors()
158 flavors_to_delete = _filter_test_resources(flavors, 'name')
159 _log_resources_count(len(flavors_to_delete), 'flavor(s)')
160 if args.dry_run:
161 return
162 for id_ in flavors_to_delete:
163 _log_resource_delete(id_, flavors_to_delete[id_], 'flavor')
164 compute.delete_flavor(id_)
165
166
167def cleanup_images():
168 images = image.images()
169 images_to_delete = _filter_test_resources(images, 'name')
170 _log_resources_count(len(images_to_delete), 'image(s)')
171 if args.dry_run:
172 return
173 for id_ in images_to_delete:
174 _log_resource_delete(id_, images_to_delete[id_], 'image')
175 image.delete_image(id_)
176
177
178def cleanup_keypairs():
179 keypairs = compute.keypairs()
180 keypairs_to_delete = _filter_test_resources(keypairs, 'name')
181 _log_resources_count(len(keypairs_to_delete), 'keypair(s)')
182 if args.dry_run:
183 return
184 for id_ in keypairs_to_delete:
185 _log_resource_delete(id_, keypairs_to_delete[id_], 'keypair')
186 compute.delete_keypair(id_)
187
188
189def cleanup_servers():
190 servers = compute.servers(all_projects=True)
191 servers_to_delete = _filter_test_resources(servers, 'name')
192 _log_resources_count(len(servers_to_delete), 'server(s)')
193 if args.dry_run:
194 return
195 for id_ in servers_to_delete:
196 if args.servers_active:
197 log.info(
198 f"... resetting {servers_to_delete[id_]} (id={id_}) server "
199 "state to 'active'")
200 compute.reset_server_state(id_, 'active')
201 _log_resource_delete(id_, servers_to_delete[id_], 'server')
202 compute.delete_server(id_)
203 srv_obj = compute.get_server(id_)
204 compute.wait_for_delete(srv_obj)
205
206
207def cleanup_snapshots():
208 snapshots = volume.snapshots(all_projects=True)
209 snapshots_to_delete = _filter_test_resources(snapshots, 'name')
210 _log_resources_count(len(snapshots_to_delete), 'snapshot(s)')
211 if args.dry_run:
212 return
213 for id_ in snapshots_to_delete:
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300214 snapshot_obj = volume.get_snapshot(id_)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200215 volume.reset_snapshot(id_, 'available')
216 _log_resource_delete(id_, snapshots_to_delete[id_], 'snapshot')
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300217 volume.delete_snapshot(id_, force=True)
218 volume.wait_for_delete(snapshot_obj)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200219
220
221def cleanup_volumes():
222 volumes = volume.volumes(all_projects=True)
223 volumes_to_delete = _filter_test_resources(volumes, 'name')
224 _log_resources_count(len(volumes_to_delete), 'volume(s)')
225 if args.dry_run:
226 return
227 for id_ in volumes_to_delete:
228 volume.reset_volume_status(id_, 'available', 'detached', 'None')
229 _log_resource_delete(id_, volumes_to_delete[id_], 'volume')
230 volume.delete_volume(id_)
231 vol_obj = volume.get_volume(id_)
232 volume.wait_for_delete(vol_obj)
233
234
235def cleanup_volume_groups():
236 groups = volume.groups()
237 groups_to_delete = _filter_test_resources(groups, 'name')
238 _log_resources_count(len(groups_to_delete), 'volume group(s)')
239 if args.dry_run:
240 return
241 for id_ in groups_to_delete:
242 _log_resource_delete(id_, groups_to_delete[id_], 'volume group')
243 volume.delete_group(id_)
244
245
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300246def cleanup_volume_backups():
247 backups = volume.backups(all_tenants=True)
248 backups_to_delete = _filter_test_resources(backups, 'name')
249 _log_resources_count(len(backups_to_delete), 'volume backup(s)')
250 if args.dry_run:
251 return
252 for id_ in backups_to_delete:
253 backup_obj = volume.get_backup(id_)
254 _log_resource_delete(id_, backups_to_delete[id_], 'volume backup')
255 volume.delete_backup(id_)
256 volume.wait_for_delete(backup_obj)
257
258
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200259def cleanup_volume_group_types():
260 group_types = volume.group_types()
261 group_types_to_delete = _filter_test_resources(group_types, 'name')
262 _log_resources_count(len(group_types_to_delete), 'volume group type(s)')
263 if args.dry_run:
264 return
265 for id_ in group_types_to_delete:
266 _log_resource_delete(
267 id_, group_types_to_delete[id_], 'volume group type')
268 volume.delete_group_type(id_)
269
270
271def cleanup_volume_types():
272 volume_types = volume.types()
273 volume_types_to_delete = _filter_test_resources(volume_types, 'name')
274 _log_resources_count(len(volume_types_to_delete), 'volume type(s)')
275 if args.dry_run:
276 return
277 for id_ in volume_types_to_delete:
278 _log_resource_delete(id_, volume_types_to_delete[id_], 'volume type')
279 volume.delete_type(id_)
280
281
282def cleanup_sec_groups():
283 sec_groups = network.security_groups()
284 sec_groups_to_delete = _filter_test_resources(sec_groups, 'name')
285 _log_resources_count(len(sec_groups_to_delete), 'security group(s)')
286 if args.dry_run:
287 return
288 for id_ in sec_groups_to_delete:
289 _log_resource_delete(id_, sec_groups_to_delete[id_], 'security group')
290 network.delete_security_group(id_)
291
292
293def cleanup_containers():
294 containers = object_store.containers()
295 containers_to_delete = _filter_test_resources(containers, 'name')
296 _log_resources_count(len(containers_to_delete), 'container(s)')
297 if args.dry_run:
298 return
299 for id_ in containers_to_delete:
300 _log_resource_delete(id_, containers_to_delete[id_], 'container')
301 object_store.delete_container(id_)
302
303
304def cleanup_routers():
305 routers = network.routers()
306 routers_to_delete = _filter_test_resources(routers, 'name')
307 _log_resources_count(len(routers_to_delete), 'router(s)')
308 if args.dry_run:
309 return
310 for id_ in routers_to_delete:
311 _log_resource_delete(id_, routers_to_delete[id_], 'router')
312
313 # Unset external gateway and remove ports from router
314 log.info("... ... removing external gateway from the router")
315 network.update_router(id_, external_gateway_info={})
316 ports = network.ports(device_id=id_)
317 for p in ports:
318 if p.device_owner != 'network:router_ha_interface':
319 log.info(f"... ... removing port {p.id} from the router")
320 network.remove_interface_from_router(id_, port_id=p.id)
321
322 network.delete_router(id_)
323
324
325def cleanup_networks():
326 nets = network.networks()
327 nets_to_delete = _filter_test_resources(nets, 'name')
328 _log_resources_count(len(nets_to_delete), 'network(s)')
329 if args.dry_run:
330 return
331 for id_ in nets_to_delete:
332 _log_resource_delete(id_, nets_to_delete[id_], 'network')
333
334 ports = network.ports(network_id=id_)
335 for p in ports:
336 log.info(
337 f"... ... removing port {p.id} from the network")
338 network.delete_port(p.id)
339 subnets = network.subnets(network_id=id_)
340 for s in subnets:
341 log.info(
342 f"... ... removing subnet {s.id} from the network")
343 network.delete_subnet(s.id)
344
345 network.delete_network(id_)
346
347
Ievgeniia Zadorozhnaab334132023-08-01 23:20:13 +0300348def cleanup_load_balancers():
349 lbs = load_balancer.load_balancers()
350 lbs_to_delete = _filter_test_resources(lbs, 'name')
351 _log_resources_count(len(lbs_to_delete), 'load_balancer(s)')
352 if args.dry_run:
353 return
354 for id_ in lbs_to_delete:
355 _log_resource_delete(id_, lbs_to_delete[id_], 'load_balancer')
356 try:
357 load_balancer.delete_load_balancer(id_, cascade=True)
358 except openstack.exceptions.ConflictException:
359 # force delete the LB in case it is in some PENDING_* state
360 _force_delete_load_balancer(id_)
361 except Exception as e:
362 log.info(f"... ... could not delete {id_} load balancer: {e}")
363
364
Ievgeniia Zadorozhna9600b182023-08-04 17:56:24 +0300365def cleanup_floating_ips():
366 projects = identity.projects()
367 list_projects_to_delete = list(_filter_test_resources(projects, 'name'))
368 floating_ips = network.ips()
369 fips_to_delete = {}
370 for ip in floating_ips:
371 # filter only non-associated IPs, only inside target projects
372 if (ip.status == 'DOWN') and (ip.fixed_ip_address is None):
373 if ip.project_id in list_projects_to_delete:
374 fips_to_delete[ip.id] = ip.floating_ip_address
375 _log_resources_count(len(fips_to_delete), 'floating ip(s)')
376 if args.dry_run:
377 return
378 for id_ in fips_to_delete:
379 _log_resource_delete(id_, fips_to_delete[id_], 'floating ip')
380 network.delete_ip(id_)
381
382
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200383if __name__ == "__main__":
384 parser = argparse.ArgumentParser(
385 description='OpenStack test resources cleanup script')
386 parser.add_argument(
387 '-t', dest='dry_run', action='store_true',
388 help='Dry run mode, no cleanup is done')
389 parser.add_argument(
390 '-P', dest='projects', action='store_true',
391 help='Force cleanup of projects')
392 parser.add_argument(
393 '-S', dest='servers_active', action='store_true',
394 help='Set servers to ACTIVE before deletion (reqiured by bare metal)')
395 parser.add_argument(
396 '-f', dest='stacks_alt', action='store_true',
397 help='Use additional mask for stack cleanup')
398
399 args = parser.parse_args()
400
401 if args.dry_run:
402 log.info("Running in dry-run mode")
403 if args.servers_active:
404 log.info("Servers will be set to ACTIVE before cleanup")
405 if args.projects:
406 log.info("Project cleanup is enabled")
407 if args.stacks_alt:
408 log.info(
409 f"Stacks will be cleaned up using additional '{stack_mask}' mask")
410
411 cleanup_stacks(stacks_alt=args.stacks_alt)
Ievgeniia Zadorozhnad70f21c2023-08-09 20:24:38 +0300412 cleanup_load_balancers()
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200413 cleanup_servers()
414 cleanup_flavors()
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300415 cleanup_volume_backups()
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200416 cleanup_snapshots()
417 cleanup_volumes()
418 cleanup_volume_groups()
419 cleanup_volume_group_types()
420 cleanup_volume_types()
421 cleanup_images()
422 cleanup_sec_groups()
423 cleanup_keypairs()
424 cleanup_users()
425 cleanup_roles()
426 cleanup_services()
427 cleanup_regions()
428 cleanup_routers()
429 cleanup_networks()
430 cleanup_containers()
Ievgeniia Zadorozhna9600b182023-08-04 17:56:24 +0300431 cleanup_floating_ips()
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200432
433 if args.projects:
434 cleanup_projects()
435
436 msg = "Cleanup is FINISHED"
437 log.info(f"\n{'=' * len(msg)}\n{msg}")