blob: 9ae394fbe93bc3d9b9601f049e4028457ecd35e0 [file] [log] [blame]
Dmitriy Kruglovb811e642022-10-06 12:24:33 +02001import argparse
2import os
3import re
4import sys
5
6import openstack
7
8
9# Send logs to both, a log file and stdout
10openstack.enable_logging(debug=False, path='openstack.log', stream=sys.stdout)
11
12# Connect to cloud
13TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'os-cloud')
14cloud = openstack.connect(cloud=TEST_CLOUD)
15log = cloud.log
16
17# Get cloud config (clouds.yaml vars)
18config_obj = openstack.config.loader.OpenStackConfig()
19cloud_config = config_obj.get_one(cloud=TEST_CLOUD)
20
21compute = cloud.compute
22identity = cloud.identity
23image = cloud.image
24network = cloud.network
25orchestration = cloud.orchestration
Dmitriy Kruglovb811e642022-10-06 12:24:33 +020026volume = cloud.volume
Ievgeniia Zadorozhnaab334132023-08-01 23:20:13 +030027load_balancer = cloud.load_balancer
Dmitriy Kruglovb811e642022-10-06 12:24:33 +020028
Ievgeniia Zadorozhnabbe0f442024-01-22 23:59:05 +010029# Check if Object Storage is present on the cloud, else skip
30object_store_present = any(service.type == 'object-store' for service
31 in list(identity.services()))
32if object_store_present:
33 object_store = cloud.object_store
34
Dmitriy Kruglovb811e642022-10-06 12:24:33 +020035mask = "cvp|s_rally|rally_|tempest-|tempest_|spt|fio"
36full_mask = f"^(?!.*(manual|-static-)).*({mask}).*$"
37mask_pattern = re.compile(full_mask, re.IGNORECASE)
38stack_mask = "api-[0-9]+-[a-z]+"
39stack_pattern = re.compile(stack_mask, re.IGNORECASE)
40
41
42def get_resource_value(resource_key, default):
43 try:
44 return cloud_config.config['custom_vars'][resource_key]
45 except KeyError:
46 return default
47
48
49def _filter_test_resources(resources, attribute, pattern=mask_pattern):
50 filtered_resources = {}
51 for item in resources:
52 # If there is no attribute in object, use just empty string as value
53 value = getattr(item, attribute, '')
54 # If the attribute value is None, use empty string instead, to be
55 # able to run regex search
56 if value is None:
57 value = ''
58 found = pattern.match(value)
59 if found:
60 filtered_resources[item.id] = getattr(item, attribute)
61 return filtered_resources
62
63
64def _log_resources_count(count, resource, pattern=mask):
65 log.info(f"{count} {resource} containing '{pattern}' are found.")
66
67
68def _log_resource_delete(id_, name, type_):
69 log.info(f"... deleting {name} (id={id_}) {type_}")
70
71
Ievgeniia Zadorozhnaab334132023-08-01 23:20:13 +030072def _force_delete_load_balancer(id_):
73 log.info(f"... ... force deleting {id_} load balancer")
74 lb_ep = load_balancer.get_endpoint()
75 lb_uri = f"{lb_ep}/lbaas/loadbalancers/{id_}"
76 headers = {'X-Auth-Token': cloud.session.get_token(),
77 'Content-Type': 'application/json'}
78 params = {'cascade': 'true', 'force': 'true'}
79 cloud.session.request(url=lb_uri, method='DELETE',
80 headers=headers, params=params)
81
82
Dmitriy Kruglovb811e642022-10-06 12:24:33 +020083def cleanup_users():
84 users = identity.users()
85 users_to_delete = _filter_test_resources(users, 'name')
86 _log_resources_count(len(users_to_delete), 'user(s)')
87 if args.dry_run:
88 return
89 for id_ in users_to_delete:
90 _log_resource_delete(id_, users_to_delete[id_], 'user')
91 identity.delete_user(id_)
92
93
94def cleanup_roles():
95 roles = identity.roles()
96 roles_to_delete = _filter_test_resources(roles, 'name')
97 _log_resources_count(len(roles_to_delete), 'role(s)')
98 if args.dry_run:
99 return
100 for id_ in roles_to_delete:
101 _log_resource_delete(id_, roles_to_delete[id_], 'role')
102 identity.delete_role(id_)
103
104
105def cleanup_projects():
106 projects = identity.projects()
107 projects_to_delete = _filter_test_resources(projects, 'name')
108 _log_resources_count(len(projects_to_delete), 'project(s)')
109 if args.dry_run:
110 return
111 for id_ in projects_to_delete:
112 _log_resource_delete(id_, projects_to_delete[id_], 'project')
113 identity.delete_project(id_)
114
115
116def cleanup_regions():
117 regions = identity.regions()
118 regions_to_delete = _filter_test_resources(regions, 'id')
119 _log_resources_count(len(regions_to_delete), 'region(s)')
120 if args.dry_run:
121 return
122 for id_ in regions_to_delete:
123 _log_resource_delete(id_, id_, 'region')
124 identity.delete_region(id_)
125
126
127def cleanup_services():
128 services = identity.services()
129 services_to_delete = _filter_test_resources(services, 'name')
130 _log_resources_count(len(services_to_delete), 'service(s)')
131 if args.dry_run:
132 return
133 for id_ in services_to_delete:
134 _log_resource_delete(id_, services_to_delete[id_], 'service')
135 identity.delete_service(id_)
136
137
138def cleanup_stacks(stacks_alt=False):
139 stacks = orchestration.stacks()
140 stacks_to_delete = _filter_test_resources(stacks, 'name')
141 _log_resources_count(len(stacks_to_delete), 'stack(s)')
142
143 # Use additional pattern for searching/deleting test Heat resources,
144 # if enabled
145 if stacks_alt:
146 stacks_alt_to_delete = _filter_test_resources(
147 stacks, 'name', stack_pattern)
148 _log_resources_count(len(stacks_alt_to_delete), 'stack(s)', stack_mask)
149 stacks_to_delete.update(stacks_alt_to_delete)
150
151 if args.dry_run:
152 return
153
154 for id_ in stacks_to_delete:
155 _log_resource_delete(id_, stacks_to_delete[id_], 'stack')
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300156 stack_obj = orchestration.get_stack(id_)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200157 orchestration.delete_stack(id_)
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300158 orchestration.wait_for_delete(stack_obj)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200159
160
161def cleanup_flavors():
162 flavors = compute.flavors()
163 flavors_to_delete = _filter_test_resources(flavors, 'name')
164 _log_resources_count(len(flavors_to_delete), 'flavor(s)')
165 if args.dry_run:
166 return
167 for id_ in flavors_to_delete:
168 _log_resource_delete(id_, flavors_to_delete[id_], 'flavor')
169 compute.delete_flavor(id_)
170
171
172def cleanup_images():
173 images = image.images()
174 images_to_delete = _filter_test_resources(images, 'name')
175 _log_resources_count(len(images_to_delete), 'image(s)')
176 if args.dry_run:
177 return
178 for id_ in images_to_delete:
179 _log_resource_delete(id_, images_to_delete[id_], 'image')
180 image.delete_image(id_)
181
182
183def cleanup_keypairs():
184 keypairs = compute.keypairs()
185 keypairs_to_delete = _filter_test_resources(keypairs, 'name')
186 _log_resources_count(len(keypairs_to_delete), 'keypair(s)')
187 if args.dry_run:
188 return
189 for id_ in keypairs_to_delete:
190 _log_resource_delete(id_, keypairs_to_delete[id_], 'keypair')
191 compute.delete_keypair(id_)
192
193
194def cleanup_servers():
195 servers = compute.servers(all_projects=True)
196 servers_to_delete = _filter_test_resources(servers, 'name')
197 _log_resources_count(len(servers_to_delete), 'server(s)')
198 if args.dry_run:
199 return
200 for id_ in servers_to_delete:
201 if args.servers_active:
202 log.info(
203 f"... resetting {servers_to_delete[id_]} (id={id_}) server "
204 "state to 'active'")
205 compute.reset_server_state(id_, 'active')
206 _log_resource_delete(id_, servers_to_delete[id_], 'server')
207 compute.delete_server(id_)
208 srv_obj = compute.get_server(id_)
209 compute.wait_for_delete(srv_obj)
210
211
212def cleanup_snapshots():
213 snapshots = volume.snapshots(all_projects=True)
214 snapshots_to_delete = _filter_test_resources(snapshots, 'name')
215 _log_resources_count(len(snapshots_to_delete), 'snapshot(s)')
216 if args.dry_run:
217 return
218 for id_ in snapshots_to_delete:
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300219 snapshot_obj = volume.get_snapshot(id_)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200220 volume.reset_snapshot(id_, 'available')
221 _log_resource_delete(id_, snapshots_to_delete[id_], 'snapshot')
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300222 volume.delete_snapshot(id_, force=True)
223 volume.wait_for_delete(snapshot_obj)
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200224
225
226def cleanup_volumes():
227 volumes = volume.volumes(all_projects=True)
228 volumes_to_delete = _filter_test_resources(volumes, 'name')
229 _log_resources_count(len(volumes_to_delete), 'volume(s)')
230 if args.dry_run:
231 return
232 for id_ in volumes_to_delete:
233 volume.reset_volume_status(id_, 'available', 'detached', 'None')
234 _log_resource_delete(id_, volumes_to_delete[id_], 'volume')
235 volume.delete_volume(id_)
236 vol_obj = volume.get_volume(id_)
237 volume.wait_for_delete(vol_obj)
238
239
240def cleanup_volume_groups():
241 groups = volume.groups()
242 groups_to_delete = _filter_test_resources(groups, 'name')
243 _log_resources_count(len(groups_to_delete), 'volume group(s)')
244 if args.dry_run:
245 return
246 for id_ in groups_to_delete:
247 _log_resource_delete(id_, groups_to_delete[id_], 'volume group')
248 volume.delete_group(id_)
249
250
Ievgeniia Zadorozhna85020982023-08-03 22:04:12 +0300251def cleanup_volume_backups():
252 backups = volume.backups(all_tenants=True)
253 backups_to_delete = _filter_test_resources(backups, 'name')
254 _log_resources_count(len(backups_to_delete), 'volume backup(s)')
255 if args.dry_run:
256 return
257 for id_ in backups_to_delete:
258 backup_obj = volume.get_backup(id_)
259 _log_resource_delete(id_, backups_to_delete[id_], 'volume backup')
260 volume.delete_backup(id_)
261 volume.wait_for_delete(backup_obj)
262
263
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200264def cleanup_volume_group_types():
265 group_types = volume.group_types()
266 group_types_to_delete = _filter_test_resources(group_types, 'name')
267 _log_resources_count(len(group_types_to_delete), 'volume group type(s)')
268 if args.dry_run:
269 return
270 for id_ in group_types_to_delete:
271 _log_resource_delete(
272 id_, group_types_to_delete[id_], 'volume group type')
273 volume.delete_group_type(id_)
274
275
276def cleanup_volume_types():
277 volume_types = volume.types()
278 volume_types_to_delete = _filter_test_resources(volume_types, 'name')
279 _log_resources_count(len(volume_types_to_delete), 'volume type(s)')
280 if args.dry_run:
281 return
282 for id_ in volume_types_to_delete:
283 _log_resource_delete(id_, volume_types_to_delete[id_], 'volume type')
284 volume.delete_type(id_)
285
286
287def cleanup_sec_groups():
288 sec_groups = network.security_groups()
289 sec_groups_to_delete = _filter_test_resources(sec_groups, 'name')
290 _log_resources_count(len(sec_groups_to_delete), 'security group(s)')
291 if args.dry_run:
292 return
293 for id_ in sec_groups_to_delete:
294 _log_resource_delete(id_, sec_groups_to_delete[id_], 'security group')
295 network.delete_security_group(id_)
296
297
298def cleanup_containers():
299 containers = object_store.containers()
300 containers_to_delete = _filter_test_resources(containers, 'name')
301 _log_resources_count(len(containers_to_delete), 'container(s)')
302 if args.dry_run:
303 return
304 for id_ in containers_to_delete:
305 _log_resource_delete(id_, containers_to_delete[id_], 'container')
306 object_store.delete_container(id_)
307
308
309def cleanup_routers():
310 routers = network.routers()
311 routers_to_delete = _filter_test_resources(routers, 'name')
312 _log_resources_count(len(routers_to_delete), 'router(s)')
313 if args.dry_run:
314 return
315 for id_ in routers_to_delete:
316 _log_resource_delete(id_, routers_to_delete[id_], 'router')
317
318 # Unset external gateway and remove ports from router
319 log.info("... ... removing external gateway from the router")
320 network.update_router(id_, external_gateway_info={})
321 ports = network.ports(device_id=id_)
322 for p in ports:
323 if p.device_owner != 'network:router_ha_interface':
324 log.info(f"... ... removing port {p.id} from the router")
325 network.remove_interface_from_router(id_, port_id=p.id)
326
327 network.delete_router(id_)
328
329
330def cleanup_networks():
331 nets = network.networks()
332 nets_to_delete = _filter_test_resources(nets, 'name')
333 _log_resources_count(len(nets_to_delete), 'network(s)')
334 if args.dry_run:
335 return
336 for id_ in nets_to_delete:
337 _log_resource_delete(id_, nets_to_delete[id_], 'network')
338
339 ports = network.ports(network_id=id_)
340 for p in ports:
341 log.info(
342 f"... ... removing port {p.id} from the network")
343 network.delete_port(p.id)
344 subnets = network.subnets(network_id=id_)
345 for s in subnets:
346 log.info(
347 f"... ... removing subnet {s.id} from the network")
348 network.delete_subnet(s.id)
349
350 network.delete_network(id_)
351
352
Ievgeniia Zadorozhnaab334132023-08-01 23:20:13 +0300353def cleanup_load_balancers():
354 lbs = load_balancer.load_balancers()
355 lbs_to_delete = _filter_test_resources(lbs, 'name')
356 _log_resources_count(len(lbs_to_delete), 'load_balancer(s)')
357 if args.dry_run:
358 return
359 for id_ in lbs_to_delete:
360 _log_resource_delete(id_, lbs_to_delete[id_], 'load_balancer')
361 try:
362 load_balancer.delete_load_balancer(id_, cascade=True)
363 except openstack.exceptions.ConflictException:
364 # force delete the LB in case it is in some PENDING_* state
365 _force_delete_load_balancer(id_)
366 except Exception as e:
367 log.info(f"... ... could not delete {id_} load balancer: {e}")
368
369
Ievgeniia Zadorozhna9600b182023-08-04 17:56:24 +0300370def cleanup_floating_ips():
371 projects = identity.projects()
372 list_projects_to_delete = list(_filter_test_resources(projects, 'name'))
373 floating_ips = network.ips()
374 fips_to_delete = {}
375 for ip in floating_ips:
376 # filter only non-associated IPs, only inside target projects
377 if (ip.status == 'DOWN') and (ip.fixed_ip_address is None):
378 if ip.project_id in list_projects_to_delete:
379 fips_to_delete[ip.id] = ip.floating_ip_address
380 _log_resources_count(len(fips_to_delete), 'floating ip(s)')
381 if args.dry_run:
382 return
383 for id_ in fips_to_delete:
384 _log_resource_delete(id_, fips_to_delete[id_], 'floating ip')
385 network.delete_ip(id_)
386
387
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200388if __name__ == "__main__":
389 parser = argparse.ArgumentParser(
390 description='OpenStack test resources cleanup script')
391 parser.add_argument(
392 '-t', dest='dry_run', action='store_true',
393 help='Dry run mode, no cleanup is done')
394 parser.add_argument(
395 '-P', dest='projects', action='store_true',
396 help='Force cleanup of projects')
397 parser.add_argument(
398 '-S', dest='servers_active', action='store_true',
399 help='Set servers to ACTIVE before deletion (reqiured by bare metal)')
400 parser.add_argument(
401 '-f', dest='stacks_alt', action='store_true',
402 help='Use additional mask for stack cleanup')
403
404 args = parser.parse_args()
405
406 if args.dry_run:
407 log.info("Running in dry-run mode")
408 if args.servers_active:
409 log.info("Servers will be set to ACTIVE before cleanup")
410 if args.projects:
411 log.info("Project cleanup is enabled")
412 if args.stacks_alt:
413 log.info(
414 f"Stacks will be cleaned up using additional '{stack_mask}' mask")
415
416 cleanup_stacks(stacks_alt=args.stacks_alt)
Ievgeniia Zadorozhnad70f21c2023-08-09 20:24:38 +0300417 cleanup_load_balancers()
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200418 cleanup_servers()
419 cleanup_flavors()
Ievgeniia Zadorozhnabbe0f442024-01-22 23:59:05 +0100420 try: # Skip if cinder-backup service is not enabled
421 cleanup_volume_backups()
422 except openstack.exceptions.ResourceNotFound:
423 pass
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200424 cleanup_snapshots()
425 cleanup_volumes()
426 cleanup_volume_groups()
427 cleanup_volume_group_types()
428 cleanup_volume_types()
429 cleanup_images()
430 cleanup_sec_groups()
431 cleanup_keypairs()
432 cleanup_users()
433 cleanup_roles()
434 cleanup_services()
435 cleanup_regions()
436 cleanup_routers()
437 cleanup_networks()
Ievgeniia Zadorozhnabbe0f442024-01-22 23:59:05 +0100438 if object_store_present:
439 cleanup_containers()
Ievgeniia Zadorozhna9600b182023-08-04 17:56:24 +0300440 cleanup_floating_ips()
Dmitriy Kruglovb811e642022-10-06 12:24:33 +0200441
442 if args.projects:
443 cleanup_projects()
444
445 msg = "Cleanup is FINISHED"
446 log.info(f"\n{'=' * len(msg)}\n{msg}")