Allow tempest cleanup delete resources based on prefix
A warning in command description of run cleanup first with dry-run
is added. The cleanup behavior is extended to allow users to delete
only resources if their name starts with a certain prefix.
Closes-Bug: #1945082
Change-Id: I65dfe051c891b3679538acec713e8616746c47f6
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index 568077e..89eec6d 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -17,6 +17,16 @@
# fail early if anything missing the IPv6 settings or deployments.
- devstack-ipv6-only-deployments-verification
tasks:
+ - name: Run tempest cleanup init-saved-state
+ include_role:
+ name: tempest-cleanup
+ vars:
+ init_saved_state: true
+ when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
+ (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
+ (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool) or
+ (run_tempest_cleanup_prefix is defined and run_tempest_cleanup_prefix | bool)
+
- name: Run Tempest version <= 26.0.0
include_role:
name: run-tempest-26
@@ -30,3 +40,15 @@
when:
- zuul.branch is defined
- zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+ - name: Run tempest cleanup dry-run
+ include_role:
+ name: tempest-cleanup
+ vars:
+ dry_run: true
+ when: run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool
+
+ - name: Run tempest cleanup
+ include_role:
+ name: tempest-cleanup
+ when: run_tempest_cleanup is defined and run_tempest_cleanup | bool
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 269999c..f9b1db0 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -27,7 +27,8 @@
init_saved_state: true
when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
(run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
- (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
+ (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool) or
+ (run_tempest_cleanup_prefix is defined and run_tempest_cleanup_prefix | bool)
- name: Run Tempest version <= 26.0.0
include_role:
diff --git a/releasenotes/notes/Allow-tempest-cleanup-delete-resources-based-on-prefix-96d9562f1f30e979.yaml b/releasenotes/notes/Allow-tempest-cleanup-delete-resources-based-on-prefix-96d9562f1f30e979.yaml
new file mode 100644
index 0000000..872f664
--- /dev/null
+++ b/releasenotes/notes/Allow-tempest-cleanup-delete-resources-based-on-prefix-96d9562f1f30e979.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ We add a new argument, ``--prefix``, to ``tempest cleanup`` tool that will
+ allow users delete only resources that match the prefix. When this option
+ is used, ``saved_state.json`` file is not needed (no need to run with
+ ``--init-saved-state`` first). If there is one, it will be ignored and the
+ cleanup will be done based on the given prefix only.
+ Note, that some resources are not named thus they will not be deleted when
+ filtering based on the prefix.
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
index d1fad90..d43319c 100644
--- a/roles/tempest-cleanup/README.rst
+++ b/roles/tempest-cleanup/README.rst
@@ -40,6 +40,12 @@
some must have been leaked. This can be also used to verify that tempest
cleanup was successful.
+.. zuul:rolevar:: run_tempest_cleanup_prefix
+ :default: false
+
+ When true, tempest cleanup will be called with '--prefix tempest' to delete
+ only resources with names that match the prefix. This option can be used
+ together with dry_run.
Role usage
----------
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
index ce78bdb..8060b29 100644
--- a/roles/tempest-cleanup/defaults/main.yaml
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -2,3 +2,4 @@
init_saved_state: false
dry_run: false
run_tempest_fail_if_leaked_resources: false
+run_tempest_cleanup_prefix: false
diff --git a/roles/tempest-cleanup/tasks/dry_run.yaml b/roles/tempest-cleanup/tasks/dry_run.yaml
index 46749ab..07e1b63 100644
--- a/roles/tempest-cleanup/tasks/dry_run.yaml
+++ b/roles/tempest-cleanup/tasks/dry_run.yaml
@@ -5,3 +5,12 @@
command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
args:
chdir: "{{ devstack_base_dir }}/tempest"
+ when: not run_tempest_cleanup_prefix
+
+- name: Run tempest cleanup dry-run with tempest prefix
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --dry-run --debug --prefix tempest
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+ when: run_tempest_cleanup_prefix
\ No newline at end of file
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
index c1d63f0..7ef4928 100644
--- a/roles/tempest-cleanup/tasks/main.yaml
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -27,6 +27,15 @@
command: tox -evenv-tempest -- tempest cleanup --debug
args:
chdir: "{{ devstack_base_dir }}/tempest"
+ when: not run_tempest_cleanup_prefix
+
+ - name: Run tempest cleanup with tempest prefix
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --debug --prefix tempest
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+ when: run_tempest_cleanup_prefix
- when:
- run_tempest_fail_if_leaked_resources
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index a8a344a..2a406de 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -28,6 +28,10 @@
.. warning::
+ We advice not to run tempest cleanup on production environments.
+
+.. warning::
+
If step 1 is skipped in the example below, the cleanup procedure
may delete resources that existed in the cloud before the test run. This
may cause an unwanted destruction of cloud resources, so use caution with
@@ -45,7 +49,10 @@
* ``--init-saved-state``: Initializes the saved state of the OpenStack
deployment and will output a ``saved_state.json`` file containing resources
from your deployment that will be preserved from the cleanup command. This
- should be done prior to running Tempest tests.
+ should be done prior to running Tempest tests. Note, that if other users of
+ your cloud could have created resources after running ``--init-saved-state``,
+ it would not protect those resources as they wouldn't be present in the
+ saved_state.json file.
* ``--delete-tempest-conf-objects``: If option is present, then the command
will delete the admin project in addition to the resources associated with
@@ -58,7 +65,27 @@
global objects that will be removed (domains, flavors, images, roles,
projects, and users). Once the cleanup command is executed (e.g. run without
parameters), running it again with ``--dry-run`` should yield an empty
- report.
+ report. We STRONGLY ENCOURAGE to run ``tempest cleanup`` with ``--dry-run``
+ first and then verify that the resources listed in the ``dry_run.json`` file
+ are meant to be deleted.
+
+* ``--prefix``: Only resources that match the prefix will be deleted. When this
+ option is used, ``saved_state.json`` file is not needed (no need to run with
+ ``--init-saved-state`` first).
+
+ All tempest resources are created with the prefix value from the config
+ option ``resource_name_prefix`` in tempest.conf. To cleanup only the
+ resources created by tempest, you should use the prefix set in your
+ tempest.conf (the default value of ``resource_name_prefix`` is ``tempest``.
+
+ Note, that some resources are not named thus they will not be deleted when
+ filtering based on the prefix. This option will be ignored when
+ ``--init-saved-state`` is used so that it can capture the true init state -
+ all resources present at that moment. If there is any ``saved_state.json``
+ file present (e.g. if you ran the tempest cleanup with ``--init-saved-state``
+ before) and you run the tempest cleanup with ``--prefix``, the
+ ``saved_state.json`` file will be ignored and cleanup will be done based on
+ the passed prefix only.
* ``--help``: Print the help text for the command and parameters.
@@ -157,6 +184,7 @@
is_dry_run = self.options.dry_run
is_preserve = not self.options.delete_tempest_conf_objects
is_save_state = False
+ cleanup_prefix = self.options.prefix
if is_dry_run:
self.dry_run_data["_projects_to_clean"] = {}
@@ -168,7 +196,8 @@
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': False,
- 'is_save_state': is_save_state}
+ 'is_save_state': is_save_state,
+ 'prefix': cleanup_prefix}
project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
projects = project_service.list()
LOG.info("Processing %s projects", len(projects))
@@ -182,6 +211,7 @@
'saved_state_json': self.json_data,
'is_preserve': is_preserve,
'is_save_state': is_save_state,
+ 'prefix': cleanup_prefix,
'got_exceptions': self.GOT_EXCEPTIONS}
LOG.info("Processing global services")
for service in self.global_services:
@@ -206,6 +236,7 @@
project_id = project['id']
project_name = project['name']
project_data = None
+ cleanup_prefix = self.options.prefix
if is_dry_run:
project_data = dry_run_data["_projects_to_clean"][project_id] = {}
project_data['name'] = project_name
@@ -216,6 +247,7 @@
'is_preserve': is_preserve,
'is_save_state': False,
'project_id': project_id,
+ 'prefix': cleanup_prefix,
'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.project_associated_services:
svc = service(self.admin_mgr, **kwargs)
@@ -243,10 +275,26 @@
help="Generate JSON file:" + DRY_RUN_JSON +
", that reports the objects that would have "
"been deleted had a full cleanup been run.")
+ parser.add_argument('--prefix', dest='prefix', default=None,
+ help="Only resources that match the prefix will "
+ "be deleted (resources in saved_state.json are "
+ "not taken into account). All tempest resources "
+ "are created with the prefix value set by "
+ "resource_name_prefix in tempest.conf, default "
+ "prefix is tempest. Note that some resources are "
+ "not named thus they will not be deleted when "
+ "filtering based on the prefix. This opt will be "
+ "ignored when --init-saved-state is used so that "
+ "it can capture the true init state - all "
+ "resources present at that moment.")
return parser
def get_description(self):
- return 'Cleanup after tempest run'
+ return ('tempest cleanup tool, read the full documentation before '
+ 'using this tool. We advice not to run it on production '
+ 'environments. On environments where also other users may '
+ 'create resources, we strongly advice using --dry-run '
+ 'argument first and verify the content of dry_run.json file.')
def _init_state(self):
LOG.info("Initializing saved state.")
@@ -257,6 +305,10 @@
'saved_state_json': data,
'is_preserve': False,
'is_save_state': True,
+ # must be None as we want to capture true init state
+ # (all resources present) thus no filtering based
+ # on the prefix
+ 'prefix': None,
'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index f2370f3..8651ab0 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -115,6 +115,16 @@
return [item for item in item_list
if item['tenant_id'] == self.tenant_id]
+ def _filter_by_prefix(self, item_list):
+ items = [item for item in item_list
+ if item['name'].startswith(self.prefix)]
+ return items
+
+ def _filter_out_ids_from_saved(self, item_list, attr):
+ items = [item for item in item_list if item['id']
+ not in self.saved_state_json[attr].keys()]
+ return items
+
def list(self):
pass
@@ -156,10 +166,11 @@
def list(self):
client = self.client
snaps = client.list_snapshots()['snapshots']
- if not self.is_save_state:
+ if self.prefix:
+ snaps = self._filter_by_prefix(snaps)
+ elif not self.is_save_state:
# recreate list removing saved snapshots
- snaps = [snap for snap in snaps if snap['id']
- not in self.saved_state_json['snapshots'].keys()]
+ snaps = self._filter_out_ids_from_saved(snaps, 'snapshots')
LOG.debug("List count, %s Snapshots", len(snaps))
return snaps
@@ -194,10 +205,11 @@
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
- if not self.is_save_state:
+ if self.prefix:
+ servers = self._filter_by_prefix(servers)
+ elif not self.is_save_state:
# recreate list removing saved servers
- servers = [server for server in servers if server['id']
- not in self.saved_state_json['servers'].keys()]
+ servers = self._filter_out_ids_from_saved(servers, 'servers')
LOG.debug("List count, %s Servers", len(servers))
return servers
@@ -227,10 +239,11 @@
def list(self):
client = self.server_groups_client
sgs = client.list_server_groups()['server_groups']
- if not self.is_save_state:
+ if self.prefix:
+ sgs = self._filter_by_prefix(sgs)
+ elif not self.is_save_state:
# recreate list removing saved server_groups
- sgs = [sg for sg in sgs if sg['id']
- not in self.saved_state_json['server_groups'].keys()]
+ sgs = self._filter_out_ids_from_saved(sgs, 'server_groups')
LOG.debug("List count, %s Server Groups", len(sgs))
return sgs
@@ -263,7 +276,9 @@
def list(self):
client = self.client
keypairs = client.list_keypairs()['keypairs']
- if not self.is_save_state:
+ if self.prefix:
+ keypairs = self._filter_by_prefix(keypairs)
+ elif not self.is_save_state:
# recreate list removing saved keypairs
keypairs = [keypair for keypair in keypairs
if keypair['keypair']['name']
@@ -302,10 +317,11 @@
def list(self):
client = self.client
vols = client.list_volumes()['volumes']
- if not self.is_save_state:
+ if self.prefix:
+ vols = self._filter_by_prefix(vols)
+ elif not self.is_save_state:
# recreate list removing saved volumes
- vols = [vol for vol in vols if vol['id']
- not in self.saved_state_json['volumes'].keys()]
+ vols = self._filter_out_ids_from_saved(vols, 'volumes')
LOG.debug("List count, %s Volumes", len(vols))
return vols
@@ -336,6 +352,10 @@
self.client = manager.volume_quotas_client_latest
def delete(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore do nothing
+ return
client = self.client
try:
LOG.debug("Deleting Volume Quotas for project with id %s",
@@ -346,6 +366,10 @@
self.project_id)
def dry_run(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore do nothing
+ return
quotas = self.client.show_quota_set(
self.project_id, params={'usage': True})['quota_set']
self.data['volume_quotas'] = quotas
@@ -358,6 +382,10 @@
self.limits_client = manager.limits_client
def delete(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore do nothing
+ return
client = self.client
try:
LOG.debug("Deleting Nova Quotas for project with id %s",
@@ -368,6 +396,10 @@
self.project_id)
def dry_run(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore do nothing
+ return
client = self.limits_client
quotas = client.show_limits()['limits']
self.data['compute_quotas'] = quotas['absolute']
@@ -379,6 +411,10 @@
self.client = manager.network_quotas_client
def delete(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore do nothing
+ return
client = self.client
try:
LOG.debug("Deleting Network Quotas for project with id %s",
@@ -389,6 +425,10 @@
self.project_id)
def dry_run(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore do nothing
+ return
resp = [quota for quota in self.client.list_quotas()['quotas']
if quota['project_id'] == self.project_id]
self.data['network_quotas'] = resp
@@ -422,11 +462,13 @@
client = self.networks_client
networks = client.list_networks(**self.tenant_filter)
networks = networks['networks']
-
- if not self.is_save_state:
- # recreate list removing saved networks
- networks = [network for network in networks if network['id']
- not in self.saved_state_json['networks'].keys()]
+ if self.prefix:
+ networks = self._filter_by_prefix(networks)
+ else:
+ if not self.is_save_state:
+ # recreate list removing saved networks
+ networks = self._filter_out_ids_from_saved(
+ networks, 'networks')
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
@@ -458,14 +500,17 @@
class NetworkFloatingIpService(BaseNetworkService):
def list(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore return empty list
+ return []
client = self.floating_ips_client
flips = client.list_floatingips(**self.tenant_filter)
flips = flips['floatingips']
if not self.is_save_state:
# recreate list removing saved flips
- flips = [flip for flip in flips if flip['id']
- not in self.saved_state_json['floatingips'].keys()]
+ flips = self._filter_out_ids_from_saved(flips, 'floatingips')
LOG.debug("List count, %s Network Floating IPs", len(flips))
return flips
@@ -498,15 +543,15 @@
client = self.routers_client
routers = client.list_routers(**self.tenant_filter)
routers = routers['routers']
-
- if not self.is_save_state:
- # recreate list removing saved routers
- routers = [router for router in routers if router['id']
- not in self.saved_state_json['routers'].keys()]
+ if self.prefix:
+ routers = self._filter_by_prefix(routers)
+ else:
+ if not self.is_save_state:
+ # recreate list removing saved routers
+ routers = self._filter_out_ids_from_saved(routers, 'routers')
if self.is_preserve:
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
-
LOG.debug("List count, %s Routers", len(routers))
return routers
@@ -547,15 +592,19 @@
class NetworkMeteringLabelRuleService(NetworkService):
def list(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore return empty list
+ return []
client = self.metering_label_rules_client
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
if not self.is_save_state:
- saved_rules = self.saved_state_json['metering_label_rules'].keys()
+ rules = self._filter_out_ids_from_saved(
+ rules, 'metering_label_rules')
# recreate list removing saved rules
- rules = [rule for rule in rules if rule['id'] not in saved_rules]
LOG.debug("List count, %s Metering Label Rules", len(rules))
return rules
@@ -589,11 +638,12 @@
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
-
- if not self.is_save_state:
+ if self.prefix:
+ labels = self._filter_by_prefix(labels)
+ elif not self.is_save_state:
# recreate list removing saved labels
- labels = [label for label in labels if label['id']
- not in self.saved_state_json['metering_labels'].keys()]
+ labels = self._filter_out_ids_from_saved(
+ labels, 'metering_labels')
LOG.debug("List count, %s Metering Labels", len(labels))
return labels
@@ -627,14 +677,14 @@
client.list_ports(**self.tenant_filter)['ports']
if port["device_owner"] == "" or
port["device_owner"].startswith("compute:")]
-
- if not self.is_save_state:
- # recreate list removing saved ports
- ports = [port for port in ports if port['id']
- not in self.saved_state_json['ports'].keys()]
+ if self.prefix:
+ ports = self._filter_by_prefix(ports)
+ else:
+ if not self.is_save_state:
+ # recreate list removing saved ports
+ ports = self._filter_out_ids_from_saved(ports, 'ports')
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
-
LOG.debug("List count, %s Ports", len(ports))
return ports
@@ -667,16 +717,18 @@
secgroups = [secgroup for secgroup in
client.list_security_groups(**filter)['security_groups']
if secgroup['name'] != 'default']
-
- if not self.is_save_state:
- # recreate list removing saved security_groups
- secgroups = [secgroup for secgroup in secgroups if secgroup['id']
- not in self.saved_state_json['security_groups'].keys()
- ]
+ if self.prefix:
+ secgroups = self._filter_by_prefix(secgroups)
+ else:
+ if not self.is_save_state:
+ # recreate list removing saved security_groups
+ secgroups = self._filter_out_ids_from_saved(
+ secgroups, 'security_groups')
if self.is_preserve:
- secgroups = [secgroup for secgroup in secgroups
- if secgroup['security_group_rules'][0]['project_id']
- not in CONF_PROJECTS]
+ secgroups = [
+ secgroup for secgroup in secgroups
+ if secgroup['security_group_rules'][0]['project_id']
+ not in CONF_PROJECTS]
LOG.debug("List count, %s security_groups", len(secgroups))
return secgroups
@@ -708,10 +760,12 @@
client = self.subnets_client
subnets = client.list_subnets(**self.tenant_filter)
subnets = subnets['subnets']
- if not self.is_save_state:
- # recreate list removing saved subnets
- subnets = [subnet for subnet in subnets if subnet['id']
- not in self.saved_state_json['subnets'].keys()]
+ if self.prefix:
+ subnets = self._filter_by_prefix(subnets)
+ else:
+ if not self.is_save_state:
+ # recreate list removing saved subnets
+ subnets = self._filter_out_ids_from_saved(subnets, 'subnets')
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets", len(subnets))
@@ -743,10 +797,12 @@
def list(self):
client = self.subnetpools_client
pools = client.list_subnetpools(**self.tenant_filter)['subnetpools']
- if not self.is_save_state:
- # recreate list removing saved subnet pools
- pools = [pool for pool in pools if pool['id']
- not in self.saved_state_json['subnetpools'].keys()]
+ if self.prefix:
+ pools = self._filter_by_prefix(pools)
+ else:
+ if not self.is_save_state:
+ # recreate list removing saved subnet pools
+ pools = self._filter_out_ids_from_saved(pools, 'subnetpools')
if self.is_preserve:
pools = [pool for pool in pools if pool['project_id']
not in CONF_PROJECTS]
@@ -782,11 +838,15 @@
self.client = manager.regions_client
def list(self):
+ if self.prefix:
+ # this means we're cleaning resources based on a certain prefix,
+ # this resource doesn't have a name, therefore return empty list
+ return []
client = self.client
regions = client.list_regions()
if not self.is_save_state:
- regions = [region for region in regions['regions'] if region['id']
- not in self.saved_state_json['regions'].keys()]
+ regions = self._filter_out_ids_from_saved(
+ regions['regions'], 'regions')
LOG.debug("List count, %s Regions", len(regions))
return regions
else:
@@ -824,11 +884,12 @@
def list(self):
client = self.client
flavors = client.list_flavors({"is_public": None})['flavors']
- if not self.is_save_state:
- # recreate list removing saved flavors
- flavors = [flavor for flavor in flavors if flavor['id']
- not in self.saved_state_json['flavors'].keys()]
-
+ if self.prefix:
+ flavors = self._filter_by_prefix(flavors)
+ else:
+ if not self.is_save_state:
+ # recreate list removing saved flavors
+ flavors = self._filter_out_ids_from_saved(flavors, 'flavors')
if self.is_preserve:
flavors = [flavor for flavor in flavors
if flavor['id'] not in CONF_FLAVORS]
@@ -871,10 +932,11 @@
marker = urllib.parse_qs(parsed.query)['marker'][0]
response = client.list_images(params={"marker": marker})
images.extend(response['images'])
-
- if not self.is_save_state:
- images = [image for image in images if image['id']
- not in self.saved_state_json['images'].keys()]
+ if self.prefix:
+ images = self._filter_by_prefix(images)
+ else:
+ if not self.is_save_state:
+ images = self._filter_out_ids_from_saved(images, 'images')
if self.is_preserve:
images = [image for image in images
if image['id'] not in CONF_IMAGES]
@@ -910,19 +972,17 @@
def list(self):
users = self.client.list_users()['users']
-
- if not self.is_save_state:
- users = [user for user in users if user['id']
- not in self.saved_state_json['users'].keys()]
-
+ if self.prefix:
+ users = self._filter_by_prefix(users)
+ else:
+ if not self.is_save_state:
+ users = self._filter_out_ids_from_saved(users, 'users')
if self.is_preserve:
users = [user for user in users if user['name']
not in CONF_USERS]
-
elif not self.is_save_state: # Never delete admin user
users = [user for user in users if user['name'] !=
CONF.auth.admin_username]
-
LOG.debug("List count, %s Users after reconcile", len(users))
return users
@@ -955,13 +1015,14 @@
def list(self):
try:
roles = self.client.list_roles()['roles']
- # reconcile roles with saved state and never list admin role
- if not self.is_save_state:
- roles = [role for role in roles if
- (role['id'] not in
- self.saved_state_json['roles'].keys() and
- role['name'] != CONF.identity.admin_role)]
- LOG.debug("List count, %s Roles after reconcile", len(roles))
+ if self.prefix:
+ roles = self._filter_by_prefix(roles)
+ elif not self.is_save_state:
+ # reconcile roles with saved state and never list admin role
+ roles = self._filter_out_ids_from_saved(roles, 'roles')
+ roles = [role for role in roles
+ if role['name'] != CONF.identity.admin_role]
+ LOG.debug("List count, %s Roles after reconcile", len(roles))
return roles
except Exception:
LOG.exception("Cannot retrieve Roles.")
@@ -995,18 +1056,17 @@
def list(self):
projects = self.client.list_projects()['projects']
- if not self.is_save_state:
- project_ids = self.saved_state_json['projects']
- projects = [project
- for project in projects
- if (project['id'] not in project_ids and
- project['name'] != CONF.auth.admin_project_name)]
-
+ if self.prefix:
+ projects = self._filter_by_prefix(projects)
+ else:
+ if not self.is_save_state:
+ projects = self._filter_out_ids_from_saved(
+ projects, 'projects')
+ projects = [project for project in projects
+ if project['name'] != CONF.auth.admin_project_name]
if self.is_preserve:
- projects = [project
- for project in projects
+ projects = [project for project in projects
if project['name'] not in CONF_PROJECTS]
-
LOG.debug("List count, %s Projects after reconcile", len(projects))
return projects
@@ -1039,10 +1099,10 @@
def list(self):
client = self.client
domains = client.list_domains()['domains']
- if not self.is_save_state:
- domains = [domain for domain in domains if domain['id']
- not in self.saved_state_json['domains'].keys()]
-
+ if self.prefix:
+ domains = self._filter_by_prefix(domains)
+ elif not self.is_save_state:
+ domains = self._filter_out_ids_from_saved(domains, 'domains')
LOG.debug("List count, %s Domains after reconcile", len(domains))
return domains
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 2301be6..6b3b4b7 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -44,6 +44,7 @@
'saved_state_json': {'saved': 'data'},
'is_preserve': False,
'is_save_state': True,
+ 'prefix': 'tempest',
'tenant_id': 'project_id',
'got_exceptions': []}
base = cleanup_service.BaseService(kwargs)
@@ -54,6 +55,7 @@
self.assertTrue(base.is_save_state)
self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
self.assertEqual(base.got_exceptions, kwargs['got_exceptions'])
+ self.assertEqual(base.prefix, kwargs['prefix'])
def test_not_implemented_ex(self):
kwargs = {'data': {'data': 'test'},
@@ -61,6 +63,7 @@
'saved_state_json': {'saved': 'data'},
'is_preserve': False,
'is_save_state': False,
+ 'prefix': 'tempest',
'tenant_id': 'project_id',
'got_exceptions': []}
base = self.TestException(kwargs)
@@ -188,7 +191,8 @@
service_name = 'default'
def _create_cmd_service(self, service_type, is_save_state=False,
- is_preserve=False, is_dry_run=False):
+ is_preserve=False, is_dry_run=False,
+ prefix=''):
creds = fake_credentials.FakeKeystoneV3Credentials()
os = clients.Manager(creds)
return getattr(cleanup_service, service_type)(
@@ -196,6 +200,7 @@
is_save_state=is_save_state,
is_preserve=is_preserve,
is_dry_run=is_dry_run,
+ prefix=prefix,
project_id='b8e3ece07bb049138d224436756e3b57',
data={},
saved_state_json=self.saved_state
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 10490b4..296682e 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -58,6 +58,8 @@
Base integration test with Neutron networking, IPv6 and py3.
vars:
tox_envlist: full
+ run_tempest_cleanup: true
+ run_tempest_cleanup_prefix: true
devstack_localrc:
USE_PYTHON3: true
FORCE_CONFIG_DRIVE: true