blob: ad567f2cf0f91363780fff673cce024785a18aa4 [file] [log] [blame]
Dmitriy Kruglovb811e642022-10-06 12:24:33 +02001import argparse
2import os
3import re
4import sys
5
6import openstack
7
8
9# Send logs to both, a log file and stdout
10openstack.enable_logging(debug=False, path='openstack.log', stream=sys.stdout)
11
12# Connect to cloud
13TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'os-cloud')
14cloud = openstack.connect(cloud=TEST_CLOUD)
15log = cloud.log
16
17# Get cloud config (clouds.yaml vars)
18config_obj = openstack.config.loader.OpenStackConfig()
19cloud_config = config_obj.get_one(cloud=TEST_CLOUD)
20
21compute = cloud.compute
22identity = cloud.identity
23image = cloud.image
24network = cloud.network
25orchestration = cloud.orchestration
26object_store = cloud.object_store
27volume = cloud.volume
28
29mask = "cvp|s_rally|rally_|tempest-|tempest_|spt|fio"
30full_mask = f"^(?!.*(manual|-static-)).*({mask}).*$"
31mask_pattern = re.compile(full_mask, re.IGNORECASE)
32stack_mask = "api-[0-9]+-[a-z]+"
33stack_pattern = re.compile(stack_mask, re.IGNORECASE)
34
35
36def get_resource_value(resource_key, default):
37 try:
38 return cloud_config.config['custom_vars'][resource_key]
39 except KeyError:
40 return default
41
42
43def _filter_test_resources(resources, attribute, pattern=mask_pattern):
44 filtered_resources = {}
45 for item in resources:
46 # If there is no attribute in object, use just empty string as value
47 value = getattr(item, attribute, '')
48 # If the attribute value is None, use empty string instead, to be
49 # able to run regex search
50 if value is None:
51 value = ''
52 found = pattern.match(value)
53 if found:
54 filtered_resources[item.id] = getattr(item, attribute)
55 return filtered_resources
56
57
58def _log_resources_count(count, resource, pattern=mask):
59 log.info(f"{count} {resource} containing '{pattern}' are found.")
60
61
62def _log_resource_delete(id_, name, type_):
63 log.info(f"... deleting {name} (id={id_}) {type_}")
64
65
66def cleanup_users():
67 users = identity.users()
68 users_to_delete = _filter_test_resources(users, 'name')
69 _log_resources_count(len(users_to_delete), 'user(s)')
70 if args.dry_run:
71 return
72 for id_ in users_to_delete:
73 _log_resource_delete(id_, users_to_delete[id_], 'user')
74 identity.delete_user(id_)
75
76
77def cleanup_roles():
78 roles = identity.roles()
79 roles_to_delete = _filter_test_resources(roles, 'name')
80 _log_resources_count(len(roles_to_delete), 'role(s)')
81 if args.dry_run:
82 return
83 for id_ in roles_to_delete:
84 _log_resource_delete(id_, roles_to_delete[id_], 'role')
85 identity.delete_role(id_)
86
87
88def cleanup_projects():
89 projects = identity.projects()
90 projects_to_delete = _filter_test_resources(projects, 'name')
91 _log_resources_count(len(projects_to_delete), 'project(s)')
92 if args.dry_run:
93 return
94 for id_ in projects_to_delete:
95 _log_resource_delete(id_, projects_to_delete[id_], 'project')
96 identity.delete_project(id_)
97
98
99def cleanup_regions():
100 regions = identity.regions()
101 regions_to_delete = _filter_test_resources(regions, 'id')
102 _log_resources_count(len(regions_to_delete), 'region(s)')
103 if args.dry_run:
104 return
105 for id_ in regions_to_delete:
106 _log_resource_delete(id_, id_, 'region')
107 identity.delete_region(id_)
108
109
110def cleanup_services():
111 services = identity.services()
112 services_to_delete = _filter_test_resources(services, 'name')
113 _log_resources_count(len(services_to_delete), 'service(s)')
114 if args.dry_run:
115 return
116 for id_ in services_to_delete:
117 _log_resource_delete(id_, services_to_delete[id_], 'service')
118 identity.delete_service(id_)
119
120
121def cleanup_stacks(stacks_alt=False):
122 stacks = orchestration.stacks()
123 stacks_to_delete = _filter_test_resources(stacks, 'name')
124 _log_resources_count(len(stacks_to_delete), 'stack(s)')
125
126 # Use additional pattern for searching/deleting test Heat resources,
127 # if enabled
128 if stacks_alt:
129 stacks_alt_to_delete = _filter_test_resources(
130 stacks, 'name', stack_pattern)
131 _log_resources_count(len(stacks_alt_to_delete), 'stack(s)', stack_mask)
132 stacks_to_delete.update(stacks_alt_to_delete)
133
134 if args.dry_run:
135 return
136
137 for id_ in stacks_to_delete:
138 _log_resource_delete(id_, stacks_to_delete[id_], 'stack')
139 orchestration.delete_stack(id_)
140 orchestration.wait_for_delete(id_)
141
142
143def cleanup_flavors():
144 flavors = compute.flavors()
145 flavors_to_delete = _filter_test_resources(flavors, 'name')
146 _log_resources_count(len(flavors_to_delete), 'flavor(s)')
147 if args.dry_run:
148 return
149 for id_ in flavors_to_delete:
150 _log_resource_delete(id_, flavors_to_delete[id_], 'flavor')
151 compute.delete_flavor(id_)
152
153
154def cleanup_images():
155 images = image.images()
156 images_to_delete = _filter_test_resources(images, 'name')
157 _log_resources_count(len(images_to_delete), 'image(s)')
158 if args.dry_run:
159 return
160 for id_ in images_to_delete:
161 _log_resource_delete(id_, images_to_delete[id_], 'image')
162 image.delete_image(id_)
163
164
165def cleanup_keypairs():
166 keypairs = compute.keypairs()
167 keypairs_to_delete = _filter_test_resources(keypairs, 'name')
168 _log_resources_count(len(keypairs_to_delete), 'keypair(s)')
169 if args.dry_run:
170 return
171 for id_ in keypairs_to_delete:
172 _log_resource_delete(id_, keypairs_to_delete[id_], 'keypair')
173 compute.delete_keypair(id_)
174
175
176def cleanup_servers():
177 servers = compute.servers(all_projects=True)
178 servers_to_delete = _filter_test_resources(servers, 'name')
179 _log_resources_count(len(servers_to_delete), 'server(s)')
180 if args.dry_run:
181 return
182 for id_ in servers_to_delete:
183 if args.servers_active:
184 log.info(
185 f"... resetting {servers_to_delete[id_]} (id={id_}) server "
186 "state to 'active'")
187 compute.reset_server_state(id_, 'active')
188 _log_resource_delete(id_, servers_to_delete[id_], 'server')
189 compute.delete_server(id_)
190 srv_obj = compute.get_server(id_)
191 compute.wait_for_delete(srv_obj)
192
193
194def cleanup_snapshots():
195 snapshots = volume.snapshots(all_projects=True)
196 snapshots_to_delete = _filter_test_resources(snapshots, 'name')
197 _log_resources_count(len(snapshots_to_delete), 'snapshot(s)')
198 if args.dry_run:
199 return
200 for id_ in snapshots_to_delete:
201 volume.reset_snapshot(id_, 'available')
202 _log_resource_delete(id_, snapshots_to_delete[id_], 'snapshot')
203 volume.delete_snapshot(id_)
204 volume.wait_for_delete(id_)
205
206
207def cleanup_volumes():
208 volumes = volume.volumes(all_projects=True)
209 volumes_to_delete = _filter_test_resources(volumes, 'name')
210 _log_resources_count(len(volumes_to_delete), 'volume(s)')
211 if args.dry_run:
212 return
213 for id_ in volumes_to_delete:
214 volume.reset_volume_status(id_, 'available', 'detached', 'None')
215 _log_resource_delete(id_, volumes_to_delete[id_], 'volume')
216 volume.delete_volume(id_)
217 vol_obj = volume.get_volume(id_)
218 volume.wait_for_delete(vol_obj)
219
220
221def cleanup_volume_groups():
222 groups = volume.groups()
223 groups_to_delete = _filter_test_resources(groups, 'name')
224 _log_resources_count(len(groups_to_delete), 'volume group(s)')
225 if args.dry_run:
226 return
227 for id_ in groups_to_delete:
228 _log_resource_delete(id_, groups_to_delete[id_], 'volume group')
229 volume.delete_group(id_)
230
231
232def cleanup_volume_group_types():
233 group_types = volume.group_types()
234 group_types_to_delete = _filter_test_resources(group_types, 'name')
235 _log_resources_count(len(group_types_to_delete), 'volume group type(s)')
236 if args.dry_run:
237 return
238 for id_ in group_types_to_delete:
239 _log_resource_delete(
240 id_, group_types_to_delete[id_], 'volume group type')
241 volume.delete_group_type(id_)
242
243
244def cleanup_volume_types():
245 volume_types = volume.types()
246 volume_types_to_delete = _filter_test_resources(volume_types, 'name')
247 _log_resources_count(len(volume_types_to_delete), 'volume type(s)')
248 if args.dry_run:
249 return
250 for id_ in volume_types_to_delete:
251 _log_resource_delete(id_, volume_types_to_delete[id_], 'volume type')
252 volume.delete_type(id_)
253
254
255def cleanup_sec_groups():
256 sec_groups = network.security_groups()
257 sec_groups_to_delete = _filter_test_resources(sec_groups, 'name')
258 _log_resources_count(len(sec_groups_to_delete), 'security group(s)')
259 if args.dry_run:
260 return
261 for id_ in sec_groups_to_delete:
262 _log_resource_delete(id_, sec_groups_to_delete[id_], 'security group')
263 network.delete_security_group(id_)
264
265
266def cleanup_containers():
267 containers = object_store.containers()
268 containers_to_delete = _filter_test_resources(containers, 'name')
269 _log_resources_count(len(containers_to_delete), 'container(s)')
270 if args.dry_run:
271 return
272 for id_ in containers_to_delete:
273 _log_resource_delete(id_, containers_to_delete[id_], 'container')
274 object_store.delete_container(id_)
275
276
277def cleanup_routers():
278 routers = network.routers()
279 routers_to_delete = _filter_test_resources(routers, 'name')
280 _log_resources_count(len(routers_to_delete), 'router(s)')
281 if args.dry_run:
282 return
283 for id_ in routers_to_delete:
284 _log_resource_delete(id_, routers_to_delete[id_], 'router')
285
286 # Unset external gateway and remove ports from router
287 log.info("... ... removing external gateway from the router")
288 network.update_router(id_, external_gateway_info={})
289 ports = network.ports(device_id=id_)
290 for p in ports:
291 if p.device_owner != 'network:router_ha_interface':
292 log.info(f"... ... removing port {p.id} from the router")
293 network.remove_interface_from_router(id_, port_id=p.id)
294
295 network.delete_router(id_)
296
297
298def cleanup_networks():
299 nets = network.networks()
300 nets_to_delete = _filter_test_resources(nets, 'name')
301 _log_resources_count(len(nets_to_delete), 'network(s)')
302 if args.dry_run:
303 return
304 for id_ in nets_to_delete:
305 _log_resource_delete(id_, nets_to_delete[id_], 'network')
306
307 ports = network.ports(network_id=id_)
308 for p in ports:
309 log.info(
310 f"... ... removing port {p.id} from the network")
311 network.delete_port(p.id)
312 subnets = network.subnets(network_id=id_)
313 for s in subnets:
314 log.info(
315 f"... ... removing subnet {s.id} from the network")
316 network.delete_subnet(s.id)
317
318 network.delete_network(id_)
319
320
321if __name__ == "__main__":
322 parser = argparse.ArgumentParser(
323 description='OpenStack test resources cleanup script')
324 parser.add_argument(
325 '-t', dest='dry_run', action='store_true',
326 help='Dry run mode, no cleanup is done')
327 parser.add_argument(
328 '-P', dest='projects', action='store_true',
329 help='Force cleanup of projects')
330 parser.add_argument(
331 '-S', dest='servers_active', action='store_true',
332 help='Set servers to ACTIVE before deletion (reqiured by bare metal)')
333 parser.add_argument(
334 '-f', dest='stacks_alt', action='store_true',
335 help='Use additional mask for stack cleanup')
336
337 args = parser.parse_args()
338
339 if args.dry_run:
340 log.info("Running in dry-run mode")
341 if args.servers_active:
342 log.info("Servers will be set to ACTIVE before cleanup")
343 if args.projects:
344 log.info("Project cleanup is enabled")
345 if args.stacks_alt:
346 log.info(
347 f"Stacks will be cleaned up using additional '{stack_mask}' mask")
348
349 cleanup_stacks(stacks_alt=args.stacks_alt)
350 cleanup_servers()
351 cleanup_flavors()
352 cleanup_snapshots()
353 cleanup_volumes()
354 cleanup_volume_groups()
355 cleanup_volume_group_types()
356 cleanup_volume_types()
357 cleanup_images()
358 cleanup_sec_groups()
359 cleanup_keypairs()
360 cleanup_users()
361 cleanup_roles()
362 cleanup_services()
363 cleanup_regions()
364 cleanup_routers()
365 cleanup_networks()
366 cleanup_containers()
367
368 if args.projects:
369 cleanup_projects()
370
371 msg = "Cleanup is FINISHED"
372 log.info(f"\n{'=' * len(msg)}\n{msg}")