Convert scenario test tearDown to addCleanup
This commit removes the resource management code used in the scenario
tests and replaces it with addCleanup. The tearDownClass code created
a LIFO queue for deleting resources created during tests which is
exactly what addCleanup is used for. Additionally, the previous
tearDownClass code only worked for objects created, and couldn't
undo actions such as volume attach which changed the resource state.
addCleanup doesn't have that limitation.
To handle async delete actions a list of waits is added which will be iterated
over as the last step of clearing the addCleanup queue. That way all the
delete calls are made up front and the tests won't succeed unless the deletes
are eventually successful. This is the same basic approach used in the api
tests to limit cleanup execution time except here it is multi-resource, because
of the nature of the scenario tests.
Change-Id: I315c9c67a70ab6a209d4996e2926a8a2d06c9bc1
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index e6593db..ca79325 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -22,6 +22,7 @@
import time
from cinderclient import exceptions as cinder_exceptions
+import glanceclient
from heatclient import exc as heat_exceptions
import netaddr
from neutronclient.common import exceptions as exc
@@ -84,8 +85,6 @@
cls.orchestration_client = cls.manager.orchestration_client
cls.data_processing_client = cls.manager.data_processing_client
cls.ceilometer_client = cls.manager.ceilometer_client
- cls.resource_keys = {}
- cls.os_resources = []
@classmethod
def _get_credentials(cls, get_creds, ctype):
@@ -110,72 +109,85 @@
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'identity_admin')
- @staticmethod
- def cleanup_resource(resource, test_name):
+ def setUp(self):
+ super(OfficialClientTest, self).setUp()
+ self.cleanup_waits = []
+ # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
+ # because scenario tests in the same test class should not share
+ # resources. If resources were shared between test cases then it
+ # should be a single scenario test instead of multiples.
- LOG.debug("Deleting %r from shared resources of %s" %
- (resource, test_name))
+ # NOTE(yfried): this list is cleaned at the end of test_methods and
+ # not at the end of the class
+ self.addCleanup(self._wait_for_cleanups)
+
+ @staticmethod
+ def not_found_exception(exception):
+ """
+ @return: True if exception is of NotFound type
+ """
+ NOT_FOUND_LIST = ['NotFound', 'HTTPNotFound']
+ return (exception.__class__.__name__ in NOT_FOUND_LIST
+ or
+ hasattr(exception, 'status_code') and
+ exception.status_code == 404)
+
+ def delete_wrapper(self, thing):
+ """Ignores NotFound exceptions for delete operations.
+
+ @param thing: object with delete() method.
+ OpenStack resources are assumed to have a delete() method which
+ destroys the resource
+ """
+
try:
- # OpenStack resources are assumed to have a delete()
- # method which destroys the resource...
- resource.delete()
+ thing.delete()
except Exception as e:
# If the resource is already missing, mission accomplished.
- # - Status code tolerated as a workaround for bug 1247568
- # - HTTPNotFound tolerated as this is currently raised when
- # attempting to delete an already-deleted heat stack.
- if (e.__class__.__name__ in ('NotFound', 'HTTPNotFound') or
- (hasattr(e, 'status_code') and e.status_code == 404)):
- return
- raise
-
- def is_deletion_complete():
- # Deletion testing is only required for objects whose
- # existence cannot be checked via retrieval.
- if isinstance(resource, dict):
- return True
- try:
- resource.get()
- except Exception as e:
- # Clients are expected to return an exception
- # called 'NotFound' if retrieval fails.
- if e.__class__.__name__ == 'NotFound':
- return True
+ if not self.not_found_exception(e):
raise
- return False
- # Block until resource deletion has completed or timed-out
- tempest.test.call_until_true(is_deletion_complete, 10, 1)
+ def _wait_for_cleanups(self):
+ """To handle async delete actions, a list of waits is added
+ which will be iterated over as the last step of clearing the
+ cleanup queue. That way all the delete calls are made up front
+ and the tests won't succeed unless the deletes are eventually
+ successful. This is the same basic approach used in the api tests to
+ limit cleanup execution time except here it is multi-resource,
+ because of the nature of the scenario tests.
+ """
+ for wait in self.cleanup_waits:
+ self.delete_timeout(**wait)
- @classmethod
- def tearDownClass(cls):
- # NOTE(jaypipes): Because scenario tests are typically run in a
- # specific order, and because test methods in scenario tests
- # generally create resources in a particular order, we destroy
- # resources in the reverse order in which resources are added to
- # the scenario test class object
- while cls.os_resources:
- thing = cls.os_resources.pop()
- cls.cleanup_resource(thing, cls.__name__)
- cls.isolated_creds.clear_isolated_creds()
- super(OfficialClientTest, cls).tearDownClass()
+ def addCleanup_with_wait(self, things, thing_id,
+ error_status='ERROR',
+ exc_type=nova_exceptions.NotFound,
+ cleanup_callable=None, cleanup_args=[],
+ cleanup_kwargs={}):
+ """Adds wait for ansyc resource deletion at the end of cleanups
- @classmethod
- def set_resource(cls, key, thing):
- LOG.debug("Adding %r to shared resources of %s" %
- (thing, cls.__name__))
- cls.resource_keys[key] = thing
- cls.os_resources.append(thing)
-
- @classmethod
- def get_resource(cls, key):
- return cls.resource_keys[key]
-
- @classmethod
- def remove_resource(cls, key):
- thing = cls.resource_keys[key]
- cls.os_resources.remove(thing)
- del cls.resource_keys[key]
+ @param things: type of the resource to delete
+ @param thing_id:
+ @param error_status: see manager.delete_timeout()
+ @param exc_type: see manager.delete_timeout()
+ @param cleanup_callable: method to load pass to self.addCleanup with
+ the following *cleanup_args, **cleanup_kwargs.
+ usually a delete method. if not used, will try to use:
+ things.delete(thing_id)
+ """
+ if cleanup_callable is None:
+ LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
+ " default".format(rclass=things, id=thing_id))
+ self.addCleanup(things.delete, thing_id)
+ else:
+ self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
+ wait_dict = {
+ 'things': things,
+ 'thing_id': thing_id,
+ 'error_status': error_status,
+ 'not_found_exception': exc_type,
+ }
+ self.cleanup_waits.append(wait_dict)
def status_timeout(self, things, thing_id, expected_status,
error_status='ERROR',
@@ -227,8 +239,11 @@
except not_found_exception:
if allow_notfound:
return True
- else:
- raise
+ raise
+ except Exception as e:
+ if allow_notfound and self.not_found_exception(e):
+ return True
+ raise
new_status = thing.status
@@ -288,6 +303,7 @@
for ruleset in rulesets:
sg_rule = client.security_group_rules.create(secgroup_id,
**ruleset)
+ self.addCleanup(self.delete_wrapper, sg_rule)
rules.append(sg_rule)
return rules
@@ -301,7 +317,7 @@
secgroup = client.security_groups.create(sg_name, sg_desc)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
+ self.addCleanup(self.delete_wrapper, secgroup)
# Add rules to the security group
self._create_loginable_secgroup_rule_nova(client, secgroup.id)
@@ -309,7 +325,17 @@
return secgroup
def create_server(self, client=None, name=None, image=None, flavor=None,
- wait=True, create_kwargs={}):
+ wait_on_boot=True, wait_on_delete=True,
+ create_kwargs={}):
+ """Creates VM instance.
+
+ @param client: compute client to create the instance
+ @param image: image from which to create the instance
+ @param wait_on_boot: wait for status ACTIVE before continue
+ @param wait_on_delete: force synchronous delete on cleanup
+ @param create_kwargs: additional details for instance creation
+ @return: client.server object
+ """
if client is None:
client = self.compute_client
if name is None:
@@ -343,19 +369,25 @@
name, image, flavor)
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
- self.set_resource(name, server)
- if wait:
+ if wait_on_delete:
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ self.addCleanup_with_wait(self.compute_client.servers, server.id,
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[server])
+ if wait_on_boot:
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = client.servers.get(server.id)
- self.set_resource(name, server)
LOG.debug("Created server: %s", server)
return server
def create_volume(self, client=None, size=1, name=None,
- snapshot_id=None, imageRef=None, volume_type=None):
+ snapshot_id=None, imageRef=None, volume_type=None,
+ wait_on_delete=True):
if client is None:
client = self.volume_client
if name is None:
@@ -365,7 +397,12 @@
snapshot_id=snapshot_id,
imageRef=imageRef,
volume_type=volume_type)
- self.set_resource(name, volume)
+ if wait_on_delete:
+ self.addCleanup(self.delete_timeout,
+ self.volume_client.volumes,
+ volume.id)
+ self.addCleanup_with_wait(self.volume_client.volumes, volume.id,
+ exc_type=cinder_exceptions.NotFound)
self.assertEqual(name, volume.display_name)
self.status_timeout(client.volumes, volume.id, 'available')
LOG.debug("Created volume: %s", volume)
@@ -381,7 +418,8 @@
name = data_utils.rand_name('scenario-snapshot-')
LOG.debug("Creating a snapshot image for server: %s", server.name)
image_id = compute_client.servers.create_image(server, name)
- self.addCleanup(image_client.images.delete, image_id)
+ self.addCleanup_with_wait(self.image_client.images, image_id,
+ exc_type=glanceclient.exc.HTTPNotFound)
self.status_timeout(image_client.images, image_id, 'active')
snapshot_image = image_client.images.get(image_id)
self.assertEqual(name, snapshot_image.name)
@@ -396,7 +434,7 @@
name = data_utils.rand_name('scenario-keypair-')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
- self.set_resource(name, keypair)
+ self.addCleanup(self.delete_wrapper, keypair)
return keypair
def get_remote_client(self, server_or_ip, username=None, private_key=None):
@@ -590,9 +628,12 @@
'key_name': self.keypair.id
}
self.instance = self.create_server(
- wait=False, create_kwargs=create_kwargs)
+ wait_on_boot=False, create_kwargs=create_kwargs)
- self.set_resource('instance', self.instance)
+ self.addCleanup_with_wait(self.compute_client.servers,
+ self.instance.id,
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.instance])
self.wait_node(self.instance.id)
self.node = self.get_node(instance_id=self.instance.id)
@@ -617,7 +658,6 @@
def terminate_instance(self):
self.instance.delete()
- self.remove_resource('instance')
self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node.uuid,
@@ -643,11 +683,6 @@
self.status_timeout(
self.volume_client.volumes, self.volume.id, status)
- def _wait_for_volume_deletion(self):
- self.delete_timeout(
- self.volume_client.volumes, self.volume.id,
- not_found_exception=cinder_exceptions.NotFound)
-
def nova_boot(self):
self.keypair = self.create_keypair()
create_kwargs = {'key_name': self.keypair.name}
@@ -698,10 +733,6 @@
volume = self.volume_client.volumes.get(self.volume.id)
self.assertEqual('available', volume.status)
- def cinder_delete_encrypted(self):
- self.volume_client.volumes.delete(self.volume.id)
- self._wait_for_volume_deletion()
-
class NetworkScenarioTest(OfficialClientTest):
"""
@@ -740,7 +771,7 @@
network = net_common.DeletableNetwork(client=self.network_client,
**result['network'])
self.assertEqual(network.name, name)
- self.set_resource(name, network)
+ self.addCleanup(self.delete_wrapper, network)
return network
def _list_networks(self, **kwargs):
@@ -816,7 +847,7 @@
subnet = net_common.DeletableSubnet(client=self.network_client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
- self.set_resource(data_utils.rand_name(namestart), subnet)
+ self.addCleanup(self.delete_wrapper, subnet)
return subnet
def _create_port(self, network, namestart='port-quotatest-'):
@@ -829,7 +860,7 @@
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_common.DeletablePort(client=self.network_client,
**result['port'])
- self.set_resource(name, port)
+ self.addCleanup(self.delete_wrapper, port)
return port
def _get_server_port_id(self, server, ip_addr=None):
@@ -852,7 +883,7 @@
floating_ip = net_common.DeletableFloatingIp(
client=self.network_client,
**result['floatingip'])
- self.set_resource(data_utils.rand_name('floatingip-'), floating_ip)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
@@ -897,7 +928,7 @@
pool = net_common.DeletablePool(client=self.network_client,
**resp['pool'])
self.assertEqual(pool['name'], name)
- self.set_resource(name, pool)
+ self.addCleanup(self.delete_wrapper, pool)
return pool
def _create_member(self, address, protocol_port, pool_id):
@@ -912,7 +943,7 @@
resp = self.network_client.create_member(body)
member = net_common.DeletableMember(client=self.network_client,
**resp['member'])
- self.set_resource(data_utils.rand_name('member-'), member)
+ self.addCleanup(self.delete_wrapper, member)
return member
def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
@@ -931,7 +962,7 @@
vip = net_common.DeletableVip(client=self.network_client,
**resp['vip'])
self.assertEqual(vip['name'], name)
- self.set_resource(name, vip)
+ self.addCleanup(self.delete_wrapper, vip)
return vip
def _check_vm_connectivity(self, ip_address,
@@ -1073,7 +1104,7 @@
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
+ self.addCleanup(self.delete_wrapper, secgroup)
return secgroup
def _default_security_group(self, tenant_id, client=None):
@@ -1132,6 +1163,7 @@
client=client,
**sg_rule['security_group_rule']
)
+ self.addCleanup(self.delete_wrapper, sg_rule)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
@@ -1230,7 +1262,7 @@
router = net_common.DeletableRouter(client=self.network_client,
**result['router'])
self.assertEqual(router.name, name)
- self.set_resource(name, router)
+ self.addCleanup(self.delete_wrapper, router)
return router
def _create_networks(self, tenant_id=None):
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index 82ba3c5..aa7b6f8 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -59,7 +59,7 @@
# if a keypair was set, do not delete the stack on exit to allow
# for manual post-mortums
if not CONF.orchestration.keypair_name:
- self.set_resource('stack', self.stack)
+ self.addCleanup(self.client.stacks.delete, self.stack)
@test.skip_because(bug="1257575")
@test.attr(type='slow')
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 6817c48..0059619 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -42,13 +42,12 @@
availability_zone = kwargs['availability_zone']
self.assertEqual(aggregate.name, aggregate_name)
self.assertEqual(aggregate.availability_zone, availability_zone)
- self.set_resource(aggregate.id, aggregate)
+ self.addCleanup(self._delete_aggregate, aggregate)
LOG.debug("Aggregate %s created." % (aggregate.name))
return aggregate
def _delete_aggregate(self, aggregate):
self.compute_client.aggregates.delete(aggregate.id)
- self.remove_resource(aggregate.id)
LOG.debug("Aggregate %s deleted. " % (aggregate.name))
def _get_host_name(self):
@@ -60,6 +59,7 @@
def _add_host(self, aggregate_name, host):
aggregate = self.compute_client.aggregates.add_host(aggregate_name,
host)
+ self.addCleanup(self._remove_host, aggregate, host)
self.assertIn(host, aggregate.hosts)
LOG.debug("Host %s added to Aggregate %s." % (host, aggregate.name))
@@ -128,6 +128,3 @@
metadata.update(additional_metadata)
self._check_aggregate_details(aggregate, aggregate.name, az, [host],
metadata)
-
- self._remove_host(aggregate, host)
- self._delete_aggregate(aggregate)
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index f223cbf..366cd93 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -29,7 +29,6 @@
* Creates an encryption type (as admin)
* Creates a volume of that encryption type (as a regular user)
* Attaches and detaches the encrypted volume to the instance
- * Deletes the encrypted volume
"""
def launch_instance(self):
@@ -49,21 +48,16 @@
self.nova_volume_attach()
self.nova_volume_detach()
- def delete_volume(self):
- self.cinder_delete_encrypted()
-
@test.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luks(self):
self.launch_instance()
self.create_encrypted_volume('nova.volume.encryptors.'
'luks.LuksEncryptor')
self.attach_detach_volume()
- self.delete_volume()
@test.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_cryptsetup(self):
self.launch_instance()
self.create_encrypted_volume('nova.volume.encryptors.'
'cryptsetup.CryptsetupEncryptor')
- self.attach_detach_volume()
- self.delete_volume()
+ self.attach_detach_volume()
\ No newline at end of file
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index ed5743c..15cf13b 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -60,7 +60,13 @@
# needed because of bug 1199788
self.servers = [x for x in client.servers.list() if name in x.name]
for server in self.servers:
- self.set_resource(server.name, server)
+ # after deleting all servers - wait for all servers to clear
+ # before cleanup continues
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ for server in self.servers:
+ self.addCleanup_with_wait(self.compute_client.servers, server.id)
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 826da48..e041fd2 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -74,15 +74,11 @@
self.server_fixed_ips = {}
self._create_security_group()
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def _create_security_group(self):
self.security_group = self._create_security_group_neutron(
tenant_id=self.tenant_id)
self._create_security_group_rules_for_port(self.port1)
self._create_security_group_rules_for_port(self.port2)
- self.addCleanup(self.cleanup_wrapper, self.security_group)
def _create_security_group_rules_for_port(self, port):
rule = {
@@ -99,7 +95,6 @@
def _create_server(self, name):
keypair = self.create_keypair(name='keypair-%s' % name)
- self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
net = self._list_networks(tenant_id=self.tenant_id)[0]
create_kwargs = {
@@ -111,14 +106,12 @@
}
server = self.create_server(name=name,
create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
self.servers_keypairs[server.id] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips[floating_ip] = server
self.server_ips[server.id] = floating_ip.floating_ip_address
else:
@@ -211,7 +204,6 @@
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet.id)
- self.addCleanup(self.cleanup_wrapper, self.pool)
self.assertTrue(self.pool)
def _create_members(self):
@@ -227,17 +219,14 @@
member1 = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member1)
member2 = self._create_member(address=ip,
protocol_port=self.port2,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member2)
self.members.extend([member1, member2])
else:
member = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member)
self.members.append(member)
self.assertTrue(self.members)
@@ -246,7 +235,6 @@
port_id = vip.port_id
floating_ip = self._create_floating_ip(vip, public_network_id,
port_id=port_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(vip.id, [])
self.floating_ips[vip.id].append(floating_ip)
@@ -257,7 +245,6 @@
protocol_port=80,
subnet_id=self.subnet.id,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, self.vip)
self.status_timeout(NeutronRetriever(self.network_client,
self.network_client.vip_path,
net_common.DeletableVip),
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 0406217..29fdc74 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -85,7 +85,7 @@
def nova_floating_ip_create(self):
self.floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, self.floating_ip)
def nova_floating_ip_add(self):
self.server.add_floating_ip(self.floating_ip)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f1cd320..431de9a 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -50,23 +50,15 @@
cls.enabled = False
raise cls.skipException(msg)
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def setUp(self):
super(TestNetworkAdvancedServerOps, self).setUp()
key_name = data_utils.rand_name('keypair-smoke-')
self.keypair = self.create_keypair(name=key_name)
- self.addCleanup(self.cleanup_wrapper, self.keypair)
security_group =\
self._create_security_group_neutron(tenant_id=self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, security_group)
network = self._create_network(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, network)
router = self._get_router(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, router)
subnet = self._create_subnet(network)
- self.addCleanup(self.cleanup_wrapper, subnet)
subnet.add_to_router(router.id)
public_network_id = CONF.network.public_network_id
create_kwargs = {
@@ -79,10 +71,8 @@
server_name = data_utils.rand_name('server-smoke-%d-')
self.server = self.create_server(name=server_name,
create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, self.server)
self.floating_ip = self._create_floating_ip(self.server,
public_network_id)
- self.addCleanup(self.cleanup_wrapper, self.floating_ip)
def _check_network_connectivity(self, should_connect=True):
username = CONF.compute.image_ssh_user
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index c84d4b9..7dc817d 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -96,17 +96,11 @@
raise cls.skipException(msg)
cls.check_preconditions()
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.security_group = \
self._create_security_group_neutron(tenant_id=self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, self.security_group)
self.network, self.subnet, self.router = self._create_networks()
- for r in [self.network, self.router, self.subnet]:
- self.addCleanup(self.cleanup_wrapper, r)
self.check_networks()
self.servers = {}
name = data_utils.rand_name('server-smoke')
@@ -144,7 +138,6 @@
def _create_server(self, name, network):
keypair = self.create_keypair(name='keypair-%s' % name)
- self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
create_kwargs = {
'nics': [
@@ -154,7 +147,6 @@
'security_groups': security_groups,
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
return dict(server=server, keypair=keypair)
def _check_tenant_network_connectivity(self):
@@ -171,7 +163,6 @@
for server in self.servers.keys():
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
@@ -204,11 +195,9 @@
def _create_new_network(self):
self.new_net = self._create_network(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, self.new_net)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
- self.addCleanup(self.cleanup_wrapper, self.new_subnet)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
@@ -226,7 +215,10 @@
port_id=None,
fixed_ip=None)
# move server to the head of the cleanup list
- self.addCleanup(self.cleanup_wrapper, server)
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ self.addCleanup(self.delete_wrapper, server)
def check_ports():
port_list = [port for port in
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index dd89dc0..8058b3d 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -165,7 +165,6 @@
def _create_tenant_keypairs(self, tenant_id):
keypair = self.create_keypair(
name=data_utils.rand_name('keypair-smoke-'))
- self.addCleanup(self.cleanup_wrapper, keypair)
self.tenants[tenant_id].keypair = keypair
def _create_tenant_security_groups(self, tenant):
@@ -173,14 +172,12 @@
namestart='secgroup_access-',
tenant_id=tenant.creds.tenant_id
)
- self.addCleanup(self.cleanup_wrapper, access_sg)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
tenant_id=tenant.creds.tenant_id
)
- self.addCleanup(self.cleanup_wrapper, def_sg)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
protocol='tcp',
@@ -188,9 +185,7 @@
port_range_max=22,
direction='ingress',
)
- rule = self._create_security_group_rule(secgroup=access_sg,
- **ssh_rule)
- self.addCleanup(self.cleanup_wrapper, rule)
+ self._create_security_group_rule(secgroup=access_sg, **ssh_rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
@@ -238,7 +233,6 @@
'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
return server
def _create_tenant_servers(self, tenant, num=1):
@@ -269,13 +263,10 @@
def _assign_floating_ips(self, server):
public_network_id = CONF.network.public_network_id
floating_ip = self._create_floating_ip(server, public_network_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(server, floating_ip)
def _create_tenant_network(self, tenant):
network, subnet, router = self._create_networks(tenant.creds.tenant_id)
- for r in [network, router, subnet]:
- self.addCleanup(self.cleanup_wrapper, r)
tenant.set_network(network, subnet, router)
def _set_compute_context(self, tenant):
@@ -355,11 +346,10 @@
remote_group_id=tenant.security_groups['default'].id,
direction='ingress'
)
- rule = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule)
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
@@ -385,11 +375,10 @@
protocol='icmp',
direction='ingress'
)
- rule_s2d = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule_s2d)
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
@@ -399,11 +388,10 @@
self._test_cross_tenant_block(dest_tenant, source_tenant)
# allow reverse traffic and check
- rule_d2s = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule_d2s)
access_point_ssh_2 = self._connect_to_access_point(dest_tenant)
ip = self._get_server_ip(source_tenant.access_point,
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 54f1d9e..38686d9 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -74,23 +74,17 @@
'key_name': self.keypair.id,
'security_groups': security_groups
}
- instance = self.create_server(image=self.image_ref,
- flavor=self.flavor_ref,
- create_kwargs=create_kwargs)
- self.set_resource('instance', instance)
-
- def terminate_instance(self):
- instance = self.get_resource('instance')
- instance.delete()
- self.remove_resource('instance')
+ self.instance = self.create_server(image=self.image_ref,
+ flavor=self.flavor_ref,
+ create_kwargs=create_kwargs)
def verify_ssh(self):
if self.run_ssh:
# Obtain a floating IP
floating_ip = self.compute_client.floating_ips.create()
+ self.addCleanup(self.delete_wrapper, floating_ip)
# Attach a floating IP
- instance = self.get_resource('instance')
- instance.add_floating_ip(floating_ip)
+ self.instance.add_floating_ip(floating_ip)
# Check ssh
try:
self.get_remote_client(
@@ -108,4 +102,4 @@
self.security_group = self._create_security_group_nova()
self.boot_instance()
self.verify_ssh()
- self.terminate_instance()
+ self.instance.delete()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index d41490a..7dd662d 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -65,7 +65,7 @@
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _set_floating_ip_to_server(self, server, floating_ip):
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 20561ae..be27024 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -74,7 +74,7 @@
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _add_floating_ip(self, server, floating_ip):
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 4905dbf..bf5d1f6 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from cinderclient import exceptions as cinder_exc
+
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
@@ -69,7 +71,8 @@
snap = volume_snapshots.create(volume_id=vol_id,
force=True,
display_name=snap_name)
- self.set_resource(snap.id, snap)
+ self.addCleanup_with_wait(self.volume_client.volume_snapshots, snap.id,
+ exc_type=cinder_exc.NotFound)
self.status_timeout(volume_snapshots,
snap.id,
'available')
@@ -100,8 +103,7 @@
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
floating_ip = self.compute_client.floating_ips.create()
- fip_name = data_utils.rand_name('scenario-fip')
- self.set_resource(fip_name, floating_ip)
+ self.addCleanup(self.delete_wrapper, floating_ip)
server.add_floating_ip(floating_ip)
ip = floating_ip.ip
else: