-
Notifications
You must be signed in to change notification settings - Fork 14
/
cleanup.py
446 lines (370 loc) · 14.9 KB
/
cleanup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
import argparse
import os
import re
import sys
import openstack
# Send logs to both, a log file and stdout
openstack.enable_logging(debug=False, path='openstack.log', stream=sys.stdout)
# Connect to cloud
TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'os-cloud')
cloud = openstack.connect(cloud=TEST_CLOUD)
log = cloud.log
# Get cloud config (clouds.yaml vars)
config_obj = openstack.config.loader.OpenStackConfig()
cloud_config = config_obj.get_one(cloud=TEST_CLOUD)
compute = cloud.compute
identity = cloud.identity
image = cloud.image
network = cloud.network
orchestration = cloud.orchestration
volume = cloud.volume
load_balancer = cloud.load_balancer
# Check if Object Storage is present on the cloud, else skip
object_store_present = any(service.type == 'object-store' for service
in list(identity.services()))
if object_store_present:
object_store = cloud.object_store
mask = "cvp|s_rally|rally_|tempest-|tempest_|spt|fio"
full_mask = f"^(?!.*(manual|-static-)).*({mask}).*$"
mask_pattern = re.compile(full_mask, re.IGNORECASE)
stack_mask = "api-[0-9]+-[a-z]+"
stack_pattern = re.compile(stack_mask, re.IGNORECASE)
def get_resource_value(resource_key, default):
try:
return cloud_config.config['custom_vars'][resource_key]
except KeyError:
return default
def _filter_test_resources(resources, attribute, pattern=mask_pattern):
filtered_resources = {}
for item in resources:
# If there is no attribute in object, use just empty string as value
value = getattr(item, attribute, '')
# If the attribute value is None, use empty string instead, to be
# able to run regex search
if value is None:
value = ''
found = pattern.match(value)
if found:
filtered_resources[item.id] = getattr(item, attribute)
return filtered_resources
def _log_resources_count(count, resource, pattern=mask):
log.info(f"{count} {resource} containing '{pattern}' are found.")
def _log_resource_delete(id_, name, type_):
log.info(f"... deleting {name} (id={id_}) {type_}")
def _force_delete_load_balancer(id_):
log.info(f"... ... force deleting {id_} load balancer")
lb_ep = load_balancer.get_endpoint()
lb_uri = f"{lb_ep}/lbaas/loadbalancers/{id_}"
headers = {'X-Auth-Token': cloud.session.get_token(),
'Content-Type': 'application/json'}
params = {'cascade': 'true', 'force': 'true'}
cloud.session.request(url=lb_uri, method='DELETE',
headers=headers, params=params)
def cleanup_users():
users = identity.users()
users_to_delete = _filter_test_resources(users, 'name')
_log_resources_count(len(users_to_delete), 'user(s)')
if args.dry_run:
return
for id_ in users_to_delete:
_log_resource_delete(id_, users_to_delete[id_], 'user')
identity.delete_user(id_)
def cleanup_roles():
roles = identity.roles()
roles_to_delete = _filter_test_resources(roles, 'name')
_log_resources_count(len(roles_to_delete), 'role(s)')
if args.dry_run:
return
for id_ in roles_to_delete:
_log_resource_delete(id_, roles_to_delete[id_], 'role')
identity.delete_role(id_)
def cleanup_projects():
projects = identity.projects()
projects_to_delete = _filter_test_resources(projects, 'name')
_log_resources_count(len(projects_to_delete), 'project(s)')
if args.dry_run:
return
for id_ in projects_to_delete:
_log_resource_delete(id_, projects_to_delete[id_], 'project')
identity.delete_project(id_)
def cleanup_regions():
regions = identity.regions()
regions_to_delete = _filter_test_resources(regions, 'id')
_log_resources_count(len(regions_to_delete), 'region(s)')
if args.dry_run:
return
for id_ in regions_to_delete:
_log_resource_delete(id_, id_, 'region')
identity.delete_region(id_)
def cleanup_services():
services = identity.services()
services_to_delete = _filter_test_resources(services, 'name')
_log_resources_count(len(services_to_delete), 'service(s)')
if args.dry_run:
return
for id_ in services_to_delete:
_log_resource_delete(id_, services_to_delete[id_], 'service')
identity.delete_service(id_)
def cleanup_stacks(stacks_alt=False):
stacks = orchestration.stacks()
stacks_to_delete = _filter_test_resources(stacks, 'name')
_log_resources_count(len(stacks_to_delete), 'stack(s)')
# Use additional pattern for searching/deleting test Heat resources,
# if enabled
if stacks_alt:
stacks_alt_to_delete = _filter_test_resources(
stacks, 'name', stack_pattern)
_log_resources_count(len(stacks_alt_to_delete), 'stack(s)', stack_mask)
stacks_to_delete.update(stacks_alt_to_delete)
if args.dry_run:
return
for id_ in stacks_to_delete:
_log_resource_delete(id_, stacks_to_delete[id_], 'stack')
stack_obj = orchestration.get_stack(id_)
orchestration.delete_stack(id_)
orchestration.wait_for_delete(stack_obj)
def cleanup_flavors():
flavors = compute.flavors()
flavors_to_delete = _filter_test_resources(flavors, 'name')
_log_resources_count(len(flavors_to_delete), 'flavor(s)')
if args.dry_run:
return
for id_ in flavors_to_delete:
_log_resource_delete(id_, flavors_to_delete[id_], 'flavor')
compute.delete_flavor(id_)
def cleanup_images():
images = image.images()
images_to_delete = _filter_test_resources(images, 'name')
_log_resources_count(len(images_to_delete), 'image(s)')
if args.dry_run:
return
for id_ in images_to_delete:
_log_resource_delete(id_, images_to_delete[id_], 'image')
image.delete_image(id_)
def cleanup_keypairs():
keypairs = compute.keypairs()
keypairs_to_delete = _filter_test_resources(keypairs, 'name')
_log_resources_count(len(keypairs_to_delete), 'keypair(s)')
if args.dry_run:
return
for id_ in keypairs_to_delete:
_log_resource_delete(id_, keypairs_to_delete[id_], 'keypair')
compute.delete_keypair(id_)
def cleanup_servers():
servers = compute.servers(all_projects=True)
servers_to_delete = _filter_test_resources(servers, 'name')
_log_resources_count(len(servers_to_delete), 'server(s)')
if args.dry_run:
return
for id_ in servers_to_delete:
if args.servers_active:
log.info(
f"... resetting {servers_to_delete[id_]} (id={id_}) server "
"state to 'active'")
compute.reset_server_state(id_, 'active')
_log_resource_delete(id_, servers_to_delete[id_], 'server')
compute.delete_server(id_)
srv_obj = compute.get_server(id_)
compute.wait_for_delete(srv_obj)
def cleanup_snapshots():
snapshots = volume.snapshots(all_projects=True)
snapshots_to_delete = _filter_test_resources(snapshots, 'name')
_log_resources_count(len(snapshots_to_delete), 'snapshot(s)')
if args.dry_run:
return
for id_ in snapshots_to_delete:
snapshot_obj = volume.get_snapshot(id_)
volume.reset_snapshot(id_, 'available')
_log_resource_delete(id_, snapshots_to_delete[id_], 'snapshot')
volume.delete_snapshot(id_, force=True)
volume.wait_for_delete(snapshot_obj)
def cleanup_volumes():
volumes = volume.volumes(all_projects=True)
volumes_to_delete = _filter_test_resources(volumes, 'name')
_log_resources_count(len(volumes_to_delete), 'volume(s)')
if args.dry_run:
return
for id_ in volumes_to_delete:
volume.reset_volume_status(id_, 'available', 'detached', 'None')
_log_resource_delete(id_, volumes_to_delete[id_], 'volume')
volume.delete_volume(id_)
vol_obj = volume.get_volume(id_)
volume.wait_for_delete(vol_obj)
def cleanup_volume_groups():
groups = volume.groups()
groups_to_delete = _filter_test_resources(groups, 'name')
_log_resources_count(len(groups_to_delete), 'volume group(s)')
if args.dry_run:
return
for id_ in groups_to_delete:
_log_resource_delete(id_, groups_to_delete[id_], 'volume group')
volume.delete_group(id_)
def cleanup_volume_backups():
backups = volume.backups(all_tenants=True)
backups_to_delete = _filter_test_resources(backups, 'name')
_log_resources_count(len(backups_to_delete), 'volume backup(s)')
if args.dry_run:
return
for id_ in backups_to_delete:
backup_obj = volume.get_backup(id_)
_log_resource_delete(id_, backups_to_delete[id_], 'volume backup')
volume.delete_backup(id_)
volume.wait_for_delete(backup_obj)
def cleanup_volume_group_types():
group_types = volume.group_types()
group_types_to_delete = _filter_test_resources(group_types, 'name')
_log_resources_count(len(group_types_to_delete), 'volume group type(s)')
if args.dry_run:
return
for id_ in group_types_to_delete:
_log_resource_delete(
id_, group_types_to_delete[id_], 'volume group type')
volume.delete_group_type(id_)
def cleanup_volume_types():
volume_types = volume.types()
volume_types_to_delete = _filter_test_resources(volume_types, 'name')
_log_resources_count(len(volume_types_to_delete), 'volume type(s)')
if args.dry_run:
return
for id_ in volume_types_to_delete:
_log_resource_delete(id_, volume_types_to_delete[id_], 'volume type')
volume.delete_type(id_)
def cleanup_sec_groups():
sec_groups = network.security_groups()
sec_groups_to_delete = _filter_test_resources(sec_groups, 'name')
_log_resources_count(len(sec_groups_to_delete), 'security group(s)')
if args.dry_run:
return
for id_ in sec_groups_to_delete:
_log_resource_delete(id_, sec_groups_to_delete[id_], 'security group')
network.delete_security_group(id_)
def cleanup_containers():
containers = object_store.containers()
containers_to_delete = _filter_test_resources(containers, 'name')
_log_resources_count(len(containers_to_delete), 'container(s)')
if args.dry_run:
return
for id_ in containers_to_delete:
_log_resource_delete(id_, containers_to_delete[id_], 'container')
object_store.delete_container(id_)
def cleanup_routers():
routers = network.routers()
routers_to_delete = _filter_test_resources(routers, 'name')
_log_resources_count(len(routers_to_delete), 'router(s)')
if args.dry_run:
return
for id_ in routers_to_delete:
_log_resource_delete(id_, routers_to_delete[id_], 'router')
# Unset external gateway and remove ports from router
log.info("... ... removing external gateway from the router")
network.update_router(id_, external_gateway_info={})
ports = network.ports(device_id=id_)
for p in ports:
if p.device_owner != 'network:router_ha_interface':
log.info(f"... ... removing port {p.id} from the router")
network.remove_interface_from_router(id_, port_id=p.id)
network.delete_router(id_)
def cleanup_networks():
nets = network.networks()
nets_to_delete = _filter_test_resources(nets, 'name')
_log_resources_count(len(nets_to_delete), 'network(s)')
if args.dry_run:
return
for id_ in nets_to_delete:
_log_resource_delete(id_, nets_to_delete[id_], 'network')
ports = network.ports(network_id=id_)
for p in ports:
log.info(
f"... ... removing port {p.id} from the network")
network.delete_port(p.id)
subnets = network.subnets(network_id=id_)
for s in subnets:
log.info(
f"... ... removing subnet {s.id} from the network")
network.delete_subnet(s.id)
network.delete_network(id_)
def cleanup_load_balancers():
lbs = load_balancer.load_balancers()
lbs_to_delete = _filter_test_resources(lbs, 'name')
_log_resources_count(len(lbs_to_delete), 'load_balancer(s)')
if args.dry_run:
return
for id_ in lbs_to_delete:
_log_resource_delete(id_, lbs_to_delete[id_], 'load_balancer')
try:
load_balancer.delete_load_balancer(id_, cascade=True)
except openstack.exceptions.ConflictException:
# force delete the LB in case it is in some PENDING_* state
_force_delete_load_balancer(id_)
except Exception as e:
log.info(f"... ... could not delete {id_} load balancer: {e}")
def cleanup_floating_ips():
projects = identity.projects()
list_projects_to_delete = list(_filter_test_resources(projects, 'name'))
floating_ips = network.ips()
fips_to_delete = {}
for ip in floating_ips:
# filter only non-associated IPs, only inside target projects
if (ip.status == 'DOWN') and (ip.fixed_ip_address is None):
if ip.project_id in list_projects_to_delete:
fips_to_delete[ip.id] = ip.floating_ip_address
_log_resources_count(len(fips_to_delete), 'floating ip(s)')
if args.dry_run:
return
for id_ in fips_to_delete:
_log_resource_delete(id_, fips_to_delete[id_], 'floating ip')
network.delete_ip(id_)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='OpenStack test resources cleanup script')
parser.add_argument(
'-t', dest='dry_run', action='store_true',
help='Dry run mode, no cleanup is done')
parser.add_argument(
'-P', dest='projects', action='store_true',
help='Force cleanup of projects')
parser.add_argument(
'-S', dest='servers_active', action='store_true',
help='Set servers to ACTIVE before deletion (reqiured by bare metal)')
parser.add_argument(
'-f', dest='stacks_alt', action='store_true',
help='Use additional mask for stack cleanup')
args = parser.parse_args()
if args.dry_run:
log.info("Running in dry-run mode")
if args.servers_active:
log.info("Servers will be set to ACTIVE before cleanup")
if args.projects:
log.info("Project cleanup is enabled")
if args.stacks_alt:
log.info(
f"Stacks will be cleaned up using additional '{stack_mask}' mask")
cleanup_stacks(stacks_alt=args.stacks_alt)
cleanup_load_balancers()
cleanup_servers()
cleanup_flavors()
try: # Skip if cinder-backup service is not enabled
cleanup_volume_backups()
except openstack.exceptions.ResourceNotFound:
pass
cleanup_snapshots()
cleanup_volumes()
cleanup_volume_groups()
cleanup_volume_group_types()
cleanup_volume_types()
cleanup_images()
cleanup_sec_groups()
cleanup_keypairs()
cleanup_users()
cleanup_roles()
cleanup_services()
cleanup_regions()
cleanup_routers()
cleanup_networks()
if object_store_present:
cleanup_containers()
cleanup_floating_ips()
if args.projects:
cleanup_projects()
msg = "Cleanup is FINISHED"
log.info(f"\n{'=' * len(msg)}\n{msg}")